1 From ca6bbb723880e91d006de6cc485259da988859aa Mon Sep 17 00:00:00 2001
2 From: John Daley <johndale@cisco.com>
3 Date: Tue, 5 Apr 2016 15:19:06 -0700
4 Subject: [PATCH 3/3] enic: Optimization of Tx path to reduce Host CPU
7 Optimizations and cleanup:
8 - flatten packet send path
9 - flatten mbuf free path
10 - disable CQ entry writing and use CQ messages instead
11 - use rte_mempool_put_bulk() to bulk return freed mbufs
12 - remove unnecessary fields vnic_bufs struct, use contiguous array of cache
13 aligned divisible elements. No next pointers.
14 - use local variables inside per packet loop instead of fields in structs.
15 - factor book keeping out of the per packet tx loop where possible
16 (removed several conditionals)
17 - put Tx and Rx code in 1 file (enic_rxtx.c)
19 Reviewed-by: Nelson Escobar <neescoba@cisco.com>
20 Signed-off-by: John Daley <johndale@cisco.com>
22 drivers/net/enic/Makefile | 2 +-
23 drivers/net/enic/base/enic_vnic_wq.h | 79 ------
24 drivers/net/enic/base/vnic_cq.h | 37 +--
25 drivers/net/enic/base/vnic_rq.h | 2 +-
26 drivers/net/enic/base/vnic_wq.c | 89 +++---
27 drivers/net/enic/base/vnic_wq.h | 113 +-------
28 drivers/net/enic/enic.h | 27 +-
29 drivers/net/enic/enic_ethdev.c | 67 +----
30 drivers/net/enic/enic_main.c | 132 +++------
31 drivers/net/enic/enic_res.h | 81 +-----
32 drivers/net/enic/enic_rx.c | 361 -------------------------
33 drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++
34 12 files changed, 635 insertions(+), 860 deletions(-)
35 delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h
36 delete mode 100644 drivers/net/enic/enic_rx.c
37 create mode 100644 drivers/net/enic/enic_rxtx.c
39 diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
40 index f316274..3926b79 100644
41 --- a/drivers/net/enic/Makefile
42 +++ b/drivers/net/enic/Makefile
43 @@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src
45 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
46 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
47 -SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
48 +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
49 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
50 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
51 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
52 diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
53 deleted file mode 100644
54 index b019109..0000000
55 --- a/drivers/net/enic/base/enic_vnic_wq.h
59 - * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
60 - * Copyright 2007 Nuova Systems, Inc. All rights reserved.
62 - * Copyright (c) 2015, Cisco Systems, Inc.
63 - * All rights reserved.
65 - * Redistribution and use in source and binary forms, with or without
66 - * modification, are permitted provided that the following conditions
69 - * 1. Redistributions of source code must retain the above copyright
70 - * notice, this list of conditions and the following disclaimer.
72 - * 2. Redistributions in binary form must reproduce the above copyright
73 - * notice, this list of conditions and the following disclaimer in
74 - * the documentation and/or other materials provided with the
77 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
80 - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
81 - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
82 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
83 - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
84 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
85 - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 - * POSSIBILITY OF SUCH DAMAGE.
92 -#ifndef _ENIC_VNIC_WQ_H_
93 -#define _ENIC_VNIC_WQ_H_
95 -#include "vnic_dev.h"
98 -static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
100 - struct vnic_wq_buf *buf = wq->to_use;
102 - /* Adding write memory barrier prevents compiler and/or CPU
103 - * reordering, thus avoiding descriptor posting before
104 - * descriptor is initialized. Otherwise, hardware can read
105 - * stale descriptor fields.
108 - iowrite32(buf->index, &wq->ctrl->posted_index);
111 -static inline void enic_vnic_post_wq(struct vnic_wq *wq,
112 - void *os_buf, dma_addr_t dma_addr,
113 - unsigned int len, int sop,
114 - uint8_t desc_skip_cnt, uint8_t cq_entry,
115 - uint8_t compressed_send, uint64_t wrid)
117 - struct vnic_wq_buf *buf = wq->to_use;
120 - buf->cq_entry = cq_entry;
121 - buf->compressed_send = compressed_send;
122 - buf->desc_skip_cnt = desc_skip_cnt;
123 - buf->os_buf = os_buf;
124 - buf->dma_addr = dma_addr;
129 - wq->ring.desc_avail -= desc_skip_cnt;
133 - enic_vnic_post_wq_index(wq);
136 -#endif /* _ENIC_VNIC_WQ_H_ */
137 diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
138 index 922391b..ffc1aaa 100644
139 --- a/drivers/net/enic/base/vnic_cq.h
140 +++ b/drivers/net/enic/base/vnic_cq.h
141 @@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
142 u8 type, u16 q_number, u16 completed_index, void *opaque),
145 - struct cq_desc *cq_desc;
146 + struct cq_desc *cq_desc, *cq_desc_last;
147 unsigned int work_done = 0;
148 u16 q_number, completed_index;
150 - struct rte_mbuf **rx_pkts = opaque;
152 + u8 type, color, type_color;
154 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
155 cq->ring.desc_size * cq->to_clean);
156 - cq_desc_dec(cq_desc, &type, &color,
157 - &q_number, &completed_index);
159 + type_color = cq_desc->type_color;
160 + color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
161 + if (color == cq->last_color)
164 while (color != cq->last_color) {
166 - opaque = (void *)&(rx_pkts[work_done]);
167 + cq_desc_last = cq_desc;
169 - ret = (*q_service)(cq->vdev, cq_desc, type,
170 - q_number, completed_index, opaque);
172 if (cq->to_clean == cq->ring.desc_count) {
174 cq->last_color = cq->last_color ? 0 : 1;
178 + if (work_done >= work_to_do)
181 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
182 cq->ring.desc_size * cq->to_clean);
183 - cq_desc_dec(cq_desc, &type, &color,
184 - &q_number, &completed_index);
188 - if (work_done >= work_to_do)
190 + type_color = cq_desc->type_color;
191 + color = (type_color >> CQ_DESC_COLOR_SHIFT)
192 + & CQ_DESC_COLOR_MASK;
196 + cq_desc_dec(cq_desc_last, &type, &color,
197 + &q_number, &completed_index);
199 + (*q_service)(cq->vdev, cq_desc, type,
200 + q_number, completed_index, opaque);
204 diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
205 index e083ccc..424415c 100644
206 --- a/drivers/net/enic/base/vnic_rq.h
207 +++ b/drivers/net/enic/base/vnic_rq.h
208 @@ -74,7 +74,7 @@ struct vnic_rq {
209 struct vnic_dev_ring ring;
210 struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
211 unsigned int mbuf_next_idx; /* next mb to consume */
214 unsigned int pkts_outstanding;
216 uint16_t rx_free_thresh;
217 diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
218 index a3ef417..ccbbd61 100644
219 --- a/drivers/net/enic/base/vnic_wq.c
220 +++ b/drivers/net/enic/base/vnic_wq.c
221 @@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
223 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
225 - struct vnic_wq_buf *buf;
226 - unsigned int i, j, count = wq->ring.desc_count;
227 - unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
229 - for (i = 0; i < blks; i++) {
230 - wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
235 - for (i = 0; i < blks; i++) {
237 - for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
238 - buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
239 - buf->desc = (u8 *)wq->ring.descs +
240 - wq->ring.desc_size * buf->index;
241 - if (buf->index + 1 == count) {
242 - buf->next = wq->bufs[0];
244 - } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
245 - buf->next = wq->bufs[i + 1];
247 - buf->next = buf + 1;
253 - wq->to_use = wq->to_clean = wq->bufs[0];
255 + unsigned int count = wq->ring.desc_count;
256 + /* Allocate the mbuf ring */
257 + wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
258 + sizeof(struct vnic_wq_buf) * count,
259 + RTE_CACHE_LINE_SIZE, wq->socket_id);
262 + if (wq->bufs == NULL)
267 void vnic_wq_free(struct vnic_wq *wq)
269 struct vnic_dev *vdev;
274 vnic_dev_free_desc_ring(vdev, &wq->ring);
276 - for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
278 - kfree(wq->bufs[i]);
279 - wq->bufs[i] = NULL;
283 + rte_free(wq->bufs);
287 -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
288 - unsigned int desc_size)
292 - mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
294 - mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
295 - VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
301 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
302 unsigned int desc_count, unsigned int desc_size)
303 @@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
304 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
305 iowrite32(0, &wq->ctrl->error_status);
307 - wq->to_use = wq->to_clean =
308 - &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
309 - [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
310 + wq->head_idx = fetch_index;
311 + wq->tail_idx = wq->head_idx;
314 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
315 @@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
316 vnic_wq_init_start(wq, cq_index, 0, 0,
317 error_interrupt_enable,
318 error_interrupt_offset);
319 + wq->last_completed_index = 0;
322 void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
323 @@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq)
327 +static inline uint32_t
328 +buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
331 + if (unlikely(idx == n_descriptors))
336 void vnic_wq_clean(struct vnic_wq *wq,
337 - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
338 + void (*buf_clean)(struct vnic_wq_buf *buf))
340 struct vnic_wq_buf *buf;
341 + unsigned int to_clean = wq->tail_idx;
343 - buf = wq->to_clean;
344 + buf = &wq->bufs[to_clean];
346 while (vnic_wq_desc_used(wq) > 0) {
348 - (*buf_clean)(wq, buf);
350 + to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
352 - buf = wq->to_clean = buf->next;
353 + buf = &wq->bufs[to_clean];
354 wq->ring.desc_avail++;
357 - wq->to_use = wq->to_clean = wq->bufs[0];
361 iowrite32(0, &wq->ctrl->fetch_index);
362 iowrite32(0, &wq->ctrl->posted_index);
363 diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
364 index c23de62..37c3ff9 100644
365 --- a/drivers/net/enic/base/vnic_wq.h
366 +++ b/drivers/net/enic/base/vnic_wq.h
367 @@ -64,42 +64,23 @@ struct vnic_wq_ctrl {
373 - struct vnic_wq_buf *next;
374 - dma_addr_t dma_addr;
377 - unsigned int index;
380 - uint64_t wr_id; /* Cookie */
381 - uint8_t cq_entry; /* Gets completion event from hw */
382 - uint8_t desc_skip_cnt; /* Num descs to occupy */
383 - uint8_t compressed_send; /* Both hdr and payload in one desc */
384 + struct rte_mempool *pool;
388 -/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
389 -#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
390 -#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
391 -#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
392 - ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
393 - VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
394 -#define VNIC_WQ_BUF_BLK_SZ(entries) \
395 - (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
396 -#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
397 - DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
398 -#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
402 struct vnic_dev *vdev;
403 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
404 struct vnic_dev_ring ring;
405 - struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
406 - struct vnic_wq_buf *to_use;
407 - struct vnic_wq_buf *to_clean;
408 - unsigned int pkts_outstanding;
409 + struct vnic_wq_buf *bufs;
410 + unsigned int head_idx;
411 + unsigned int tail_idx;
412 unsigned int socket_id;
413 + const struct rte_memzone *cqmsg_rz;
414 + uint16_t last_completed_index;
417 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
418 @@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
419 return wq->ring.desc_count - wq->ring.desc_avail - 1;
422 -static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
424 - return wq->to_use->desc;
427 #define PI_LOG2_CACHE_LINE_SIZE 5
428 #define PI_INDEX_BITS 12
429 #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
430 @@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
431 PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
434 -static inline void vnic_wq_post(struct vnic_wq *wq,
435 - void *os_buf, dma_addr_t dma_addr,
436 - unsigned int len, int sop, int eop,
437 - uint8_t desc_skip_cnt, uint8_t cq_entry,
438 - uint8_t compressed_send, uint64_t wrid)
440 - struct vnic_wq_buf *buf = wq->to_use;
443 - buf->cq_entry = cq_entry;
444 - buf->compressed_send = compressed_send;
445 - buf->desc_skip_cnt = desc_skip_cnt;
446 - buf->os_buf = os_buf;
447 - buf->dma_addr = dma_addr;
454 - uint64_t wr = vnic_cached_posted_index(dma_addr, len,
457 - /* Adding write memory barrier prevents compiler and/or CPU
458 - * reordering, thus avoiding descriptor posting before
459 - * descriptor is initialized. Otherwise, hardware can read
460 - * stale descriptor fields.
464 - /* Intel chipsets seem to limit the rate of PIOs that we can
465 - * push on the bus. Thus, it is very important to do a single
466 - * 64 bit write here. With two 32-bit writes, my maximum
467 - * pkt/sec rate was cut almost in half. -AJF
469 - iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
471 - iowrite32(buf->index, &wq->ctrl->posted_index);
476 - wq->ring.desc_avail -= desc_skip_cnt;
479 -static inline void vnic_wq_service(struct vnic_wq *wq,
480 - struct cq_desc *cq_desc, u16 completed_index,
481 - void (*buf_service)(struct vnic_wq *wq,
482 - struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
485 - struct vnic_wq_buf *buf;
487 - buf = wq->to_clean;
490 - (*buf_service)(wq, cq_desc, buf, opaque);
492 - wq->ring.desc_avail++;
494 - wq->to_clean = buf->next;
496 - if (buf->index == completed_index)
499 - buf = wq->to_clean;
503 void vnic_wq_free(struct vnic_wq *wq);
504 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
505 unsigned int desc_count, unsigned int desc_size);
506 @@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
507 void vnic_wq_enable(struct vnic_wq *wq);
508 int vnic_wq_disable(struct vnic_wq *wq);
509 void vnic_wq_clean(struct vnic_wq *wq,
510 - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
511 -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
512 - unsigned int desc_size);
513 + void (*buf_clean)(struct vnic_wq_buf *buf));
515 #endif /* _VNIC_WQ_H_ */
516 diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
517 index 8c914f5..43b82a6 100644
518 --- a/drivers/net/enic/enic.h
519 +++ b/drivers/net/enic/enic.h
520 @@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
521 return (struct enic *)eth_dev->data->dev_private;
524 +static inline uint32_t
525 +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
527 + uint32_t d = i0 + i1;
528 + d -= (d >= n_descriptors) ? n_descriptors : 0;
532 +static inline uint32_t
533 +enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
535 + int32_t d = i1 - i0;
536 + return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
539 +static inline uint32_t
540 +enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
543 + if (unlikely(idx == n_descriptors))
548 #define RTE_LIBRTE_ENIC_ASSERT_ENABLE
549 #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
550 #define ASSERT(x) do { \
551 @@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic);
552 extern void enic_clsf_destroy(struct enic *enic);
553 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
556 +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
558 #endif /* _ENIC_H_ */
559 diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
560 index 6bea940..697ff82 100644
561 --- a/drivers/net/enic/enic_ethdev.c
562 +++ b/drivers/net/enic/enic_ethdev.c
563 @@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
564 enic_del_mac_address(enic);
568 -static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
572 - unsigned int frags;
573 - unsigned int pkt_len;
574 - unsigned int seg_len;
575 - unsigned int inc_len;
576 - unsigned int nb_segs;
577 - struct rte_mbuf *tx_pkt, *next_tx_pkt;
578 - struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
579 - struct enic *enic = vnic_dev_priv(wq->vdev);
580 - unsigned short vlan_id;
581 - unsigned short ol_flags;
582 - uint8_t last_seg, eop;
583 - unsigned int host_tx_descs = 0;
585 - for (index = 0; index < nb_pkts; index++) {
586 - tx_pkt = *tx_pkts++;
588 - nb_segs = tx_pkt->nb_segs;
589 - if (nb_segs > vnic_wq_desc_avail(wq)) {
591 - enic_post_wq_index(wq);
593 - /* wq cleanup and try again */
594 - if (!enic_cleanup_wq(enic, wq) ||
595 - (nb_segs > vnic_wq_desc_avail(wq))) {
600 - pkt_len = tx_pkt->pkt_len;
601 - vlan_id = tx_pkt->vlan_tci;
602 - ol_flags = tx_pkt->ol_flags;
603 - for (frags = 0; inc_len < pkt_len; frags++) {
606 - next_tx_pkt = tx_pkt->next;
607 - seg_len = tx_pkt->data_len;
608 - inc_len += seg_len;
613 - if ((pkt_len == inc_len) || !next_tx_pkt) {
615 - /* post if last packet in batch or > thresh */
616 - if ((index == (nb_pkts - 1)) ||
617 - (host_tx_descs > ENIC_TX_POST_THRESH)) {
622 - enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
623 - !frags, eop, last_seg, ol_flags, vlan_id);
624 - tx_pkt = next_tx_pkt;
628 - enic_cleanup_wq(enic, wq);
632 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
633 .dev_configure = enicpmd_dev_configure,
634 .dev_start = enicpmd_dev_start,
635 @@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
636 enic->rte_dev = eth_dev;
637 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
638 eth_dev->rx_pkt_burst = &enic_recv_pkts;
639 - eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
640 + eth_dev->tx_pkt_burst = &enic_xmit_pkts;
642 pdev = eth_dev->pci_dev;
643 rte_eth_copy_pci_info(eth_dev, pdev);
644 diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
645 index 646d87f..ba73604 100644
646 --- a/drivers/net/enic/enic_main.c
647 +++ b/drivers/net/enic/enic_main.c
652 -#include <rte_memzone.h>
653 #include <rte_malloc.h>
654 #include <rte_mbuf.h>
655 #include <rte_string_fns.h>
656 #include <rte_ethdev.h>
657 +#include <rte_memzone.h>
659 #include "enic_compat.h"
663 #include "vnic_intr.h"
664 #include "vnic_nic.h"
665 -#include "enic_vnic_wq.h"
667 static inline struct rte_mbuf *
668 rte_rxmbuf_alloc(struct rte_mempool *mp)
669 @@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
674 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
676 vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
679 -static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
680 +static void enic_free_wq_buf(struct vnic_wq_buf *buf)
682 - struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
683 + struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
685 rte_mempool_put(mbuf->pool, mbuf);
686 - buf->os_buf = NULL;
689 -static void enic_wq_free_buf(struct vnic_wq *wq,
690 - __rte_unused struct cq_desc *cq_desc,
691 - struct vnic_wq_buf *buf,
692 - __rte_unused void *opaque)
694 - enic_free_wq_buf(wq, buf);
697 -static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
698 - __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
700 - struct enic *enic = vnic_dev_priv(vdev);
702 - vnic_wq_service(&enic->wq[q_number], cq_desc,
703 - completed_index, enic_wq_free_buf,
710 static void enic_log_q_error(struct enic *enic)
711 @@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic)
715 -unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
717 - unsigned int cq = enic_cq_wq(enic, wq->index);
719 - /* Return the work done */
720 - return vnic_cq_service(&enic->cq[cq],
721 - -1 /*wq_work_to_do*/, enic_wq_service, NULL);
724 -void enic_post_wq_index(struct vnic_wq *wq)
726 - enic_vnic_post_wq_index(wq);
729 -void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
730 - struct rte_mbuf *tx_pkt, unsigned short len,
731 - uint8_t sop, uint8_t eop, uint8_t cq_entry,
732 - uint16_t ol_flags, uint16_t vlan_tag)
734 - struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
736 - uint8_t vlan_tag_insert = 0;
737 - uint64_t bus_addr = (dma_addr_t)
738 - (tx_pkt->buf_physaddr + tx_pkt->data_off);
741 - if (ol_flags & PKT_TX_VLAN_PKT)
742 - vlan_tag_insert = 1;
744 - if (enic->hw_ip_checksum) {
745 - if (ol_flags & PKT_TX_IP_CKSUM)
746 - mss |= ENIC_CALC_IP_CKSUM;
748 - if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
749 - mss |= ENIC_CALC_TCP_UDP_CKSUM;
753 - wq_enet_desc_enc(desc,
757 - 0 /* header_length */,
758 - 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
761 - 0 /* fcoe_encap */,
766 - enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
768 - 1 /*desc_skip_cnt*/,
770 - 0 /*compressed send*/,
774 void enic_dev_stats_clear(struct enic *enic)
776 @@ -297,12 +217,28 @@ void enic_init_vnic_resources(struct enic *enic)
777 unsigned int error_interrupt_enable = 1;
778 unsigned int error_interrupt_offset = 0;
779 unsigned int index = 0;
780 + unsigned int cq_idx;
782 + vnic_dev_stats_clear(enic->vdev);
784 for (index = 0; index < enic->rq_count; index++) {
785 vnic_rq_init(&enic->rq[index],
786 enic_cq_rq(enic, index),
787 error_interrupt_enable,
788 error_interrupt_offset);
790 + cq_idx = enic_cq_rq(enic, index);
791 + vnic_cq_init(&enic->cq[cq_idx],
792 + 0 /* flow_control_enable */,
793 + 1 /* color_enable */,
796 + 1 /* cq_tail_color */,
797 + 0 /* interrupt_enable */,
798 + 1 /* cq_entry_enable */,
799 + 0 /* cq_message_enable */,
800 + 0 /* interrupt offset */,
801 + 0 /* cq_message_addr */);
804 for (index = 0; index < enic->wq_count; index++) {
805 @@ -310,22 +246,19 @@ void enic_init_vnic_resources(struct enic *enic)
806 enic_cq_wq(enic, index),
807 error_interrupt_enable,
808 error_interrupt_offset);
811 - vnic_dev_stats_clear(enic->vdev);
813 - for (index = 0; index < enic->cq_count; index++) {
814 - vnic_cq_init(&enic->cq[index],
815 + cq_idx = enic_cq_wq(enic, index);
816 + vnic_cq_init(&enic->cq[cq_idx],
817 0 /* flow_control_enable */,
818 1 /* color_enable */,
821 1 /* cq_tail_color */,
822 0 /* interrupt_enable */,
823 - 1 /* cq_entry_enable */,
824 - 0 /* cq_message_enable */,
825 + 0 /* cq_entry_enable */,
826 + 1 /* cq_message_enable */,
827 0 /* interrupt offset */,
828 - 0 /* cq_message_addr */);
829 + (u64)enic->wq[index].cqmsg_rz->phys_addr);
832 vnic_intr_init(&enic->intr,
833 @@ -569,6 +502,7 @@ void enic_free_wq(void *txq)
834 struct vnic_wq *wq = (struct vnic_wq *)txq;
835 struct enic *enic = vnic_dev_priv(wq->vdev);
837 + rte_memzone_free(wq->cqmsg_rz);
839 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
841 @@ -579,6 +513,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
843 struct vnic_wq *wq = &enic->wq[queue_idx];
844 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
845 + char name[NAME_MAX];
846 + static int instance;
848 wq->socket_id = socket_id;
850 @@ -614,6 +550,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
851 dev_err(enic, "error in allocation of cq for wq\n");
854 + /* setup up CQ message */
855 + snprintf((char *)name, sizeof(name),
856 + "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
859 + wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
869 diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
870 index 00fa71d..3e1bdf5 100644
871 --- a/drivers/net/enic/enic_res.h
872 +++ b/drivers/net/enic/enic_res.h
875 #define ENIC_NON_TSO_MAX_DESC 16
876 #define ENIC_DEFAULT_RX_FREE_THRESH 32
877 -#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
878 +#define ENIC_TX_XMIT_MAX 64
880 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
882 -static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
883 - void *os_buf, dma_addr_t dma_addr, unsigned int len,
884 - unsigned int mss_or_csum_offset, unsigned int hdr_len,
885 - int vlan_tag_insert, unsigned int vlan_tag,
886 - int offload_mode, int cq_entry, int sop, int eop, int loopback)
888 - struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
889 - u8 desc_skip_cnt = 1;
890 - u8 compressed_send = 0;
893 - wq_enet_desc_enc(desc,
894 - (u64)dma_addr | VNIC_PADDR_TARGET,
896 - (u16)mss_or_csum_offset,
897 - (u16)hdr_len, (u8)offload_mode,
898 - (u8)eop, (u8)cq_entry,
899 - 0, /* fcoe_encap */
900 - (u8)vlan_tag_insert,
904 - vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
905 - (u8)cq_entry, compressed_send, wrid);
908 -static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
909 - void *os_buf, dma_addr_t dma_addr, unsigned int len,
910 - int eop, int loopback)
912 - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
914 - eop, 0 /* !SOP */, eop, loopback);
917 -static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
918 - dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
919 - unsigned int vlan_tag, int eop, int loopback)
921 - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
922 - 0, 0, vlan_tag_insert, vlan_tag,
923 - WQ_ENET_OFFLOAD_MODE_CSUM,
924 - eop, 1 /* SOP */, eop, loopback);
927 -static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
928 - void *os_buf, dma_addr_t dma_addr, unsigned int len,
929 - int ip_csum, int tcpudp_csum, int vlan_tag_insert,
930 - unsigned int vlan_tag, int eop, int loopback)
932 - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
933 - (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
934 - 0, vlan_tag_insert, vlan_tag,
935 - WQ_ENET_OFFLOAD_MODE_CSUM,
936 - eop, 1 /* SOP */, eop, loopback);
939 -static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
940 - void *os_buf, dma_addr_t dma_addr, unsigned int len,
941 - unsigned int csum_offset, unsigned int hdr_len,
942 - int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
944 - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
945 - csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
946 - WQ_ENET_OFFLOAD_MODE_CSUM_L4,
947 - eop, 1 /* SOP */, eop, loopback);
950 -static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
951 - void *os_buf, dma_addr_t dma_addr, unsigned int len,
952 - unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
953 - unsigned int vlan_tag, int eop, int loopback)
955 - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
956 - mss, hdr_len, vlan_tag_insert, vlan_tag,
957 - WQ_ENET_OFFLOAD_MODE_TSO,
958 - eop, 1 /* SOP */, eop, loopback);
963 int enic_get_vnic_config(struct enic *);
964 diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
965 deleted file mode 100644
966 index 39bb55c..0000000
967 --- a/drivers/net/enic/enic_rx.c
971 - * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
972 - * Copyright 2007 Nuova Systems, Inc. All rights reserved.
974 - * Copyright (c) 2014, Cisco Systems, Inc.
975 - * All rights reserved.
977 - * Redistribution and use in source and binary forms, with or without
978 - * modification, are permitted provided that the following conditions
981 - * 1. Redistributions of source code must retain the above copyright
982 - * notice, this list of conditions and the following disclaimer.
984 - * 2. Redistributions in binary form must reproduce the above copyright
985 - * notice, this list of conditions and the following disclaimer in
986 - * the documentation and/or other materials provided with the
989 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
990 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
991 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
992 - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
993 - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
994 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
995 - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
996 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
997 - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
998 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
999 - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1000 - * POSSIBILITY OF SUCH DAMAGE.
1004 -#include <rte_mbuf.h>
1005 -#include <rte_ethdev.h>
1006 -#include <rte_prefetch.h>
1008 -#include "enic_compat.h"
1009 -#include "rq_enet_desc.h"
1012 -#define RTE_PMD_USE_PREFETCH
1014 -#ifdef RTE_PMD_USE_PREFETCH
1016 - * Prefetch a cache line into all cache levels.
1018 -#define rte_enic_prefetch(p) rte_prefetch0(p)
1020 -#define rte_enic_prefetch(p) do {} while (0)
1023 -#ifdef RTE_PMD_PACKET_PREFETCH
1024 -#define rte_packet_prefetch(p) rte_prefetch1(p)
1026 -#define rte_packet_prefetch(p) do {} while (0)
1029 -static inline struct rte_mbuf *
1030 -rte_rxmbuf_alloc(struct rte_mempool *mp)
1032 - struct rte_mbuf *m;
1034 - m = __rte_mbuf_raw_alloc(mp);
1035 - __rte_mbuf_sanity_check_raw(m, 0);
1039 -static inline uint16_t
1040 -enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
1042 - return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
1045 -static inline uint16_t
1046 -enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
1048 - return(le16_to_cpu(crd->bytes_written_flags) &
1049 - ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
1052 -static inline uint8_t
1053 -enic_cq_rx_desc_packet_error(uint16_t bwflags)
1055 - return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
1056 - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
1059 -static inline uint8_t
1060 -enic_cq_rx_desc_eop(uint16_t ciflags)
1062 - return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
1063 - == CQ_ENET_RQ_DESC_FLAGS_EOP;
1066 -static inline uint8_t
1067 -enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
1069 - return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
1070 - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
1071 - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
1074 -static inline uint8_t
1075 -enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
1077 - return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
1078 - CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
1081 -static inline uint8_t
1082 -enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
1084 - return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
1085 - CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
1088 -static inline uint8_t
1089 -enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
1091 - return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
1092 - CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
1095 -static inline uint32_t
1096 -enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
1098 - return le32_to_cpu(cqrd->rss_hash);
1101 -static inline uint16_t
1102 -enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
1104 - return le16_to_cpu(cqrd->vlan);
1107 -static inline uint16_t
1108 -enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
1110 - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1111 - return le16_to_cpu(cqrd->bytes_written_flags) &
1112 - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
1115 -static inline uint8_t
1116 -enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
1118 - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1121 - uint64_t pkt_err_flags = 0;
1123 - bwflags = enic_cq_rx_desc_bwflags(cqrd);
1124 - if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
1125 - pkt_err_flags = PKT_RX_MAC_ERR;
1128 - *pkt_err_flags_out = pkt_err_flags;
1133 - * Lookup table to translate RX CQ flags to mbuf flags.
1135 -static inline uint32_t
1136 -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
1138 - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1139 - uint8_t cqrd_flags = cqrd->flags;
1140 - static const uint32_t cq_type_table[128] __rte_cache_aligned = {
1141 - [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
1142 - [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1143 - | RTE_PTYPE_L4_UDP,
1144 - [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1145 - | RTE_PTYPE_L4_TCP,
1146 - [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1147 - | RTE_PTYPE_L4_FRAG,
1148 - [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
1149 - [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1150 - | RTE_PTYPE_L4_UDP,
1151 - [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1152 - | RTE_PTYPE_L4_TCP,
1153 - [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1154 - | RTE_PTYPE_L4_FRAG,
1155 - /* All others reserved */
1157 - cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
1158 - | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
1159 - | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
1160 - return cq_type_table[cqrd_flags];
1164 -enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
1166 - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1167 - uint16_t ciflags, bwflags, pkt_flags = 0;
1168 - ciflags = enic_cq_rx_desc_ciflags(cqrd);
1169 - bwflags = enic_cq_rx_desc_bwflags(cqrd);
1171 - mbuf->ol_flags = 0;
1173 - /* flags are meaningless if !EOP */
1174 - if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
1175 - goto mbuf_flags_done;
1177 - /* VLAN stripping */
1178 - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
1179 - pkt_flags |= PKT_RX_VLAN_PKT;
1180 - mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
1182 - mbuf->vlan_tci = 0;
1186 - if (enic_cq_rx_desc_rss_type(cqrd)) {
1187 - pkt_flags |= PKT_RX_RSS_HASH;
1188 - mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
1191 - /* checksum flags */
1192 - if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
1193 - (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
1194 - if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
1195 - pkt_flags |= PKT_RX_IP_CKSUM_BAD;
1196 - if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
1197 - if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
1198 - pkt_flags |= PKT_RX_L4_CKSUM_BAD;
1203 - mbuf->ol_flags = pkt_flags;
1206 -static inline uint32_t
1207 -enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
1209 - uint32_t d = i0 + i1;
1210 - ASSERT(i0 < n_descriptors);
1211 - ASSERT(i1 < n_descriptors);
1212 - d -= (d >= n_descriptors) ? n_descriptors : 0;
1218 -enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1221 - struct vnic_rq *rq = rx_queue;
1222 - struct enic *enic = vnic_dev_priv(rq->vdev);
1223 - unsigned int rx_id;
1224 - struct rte_mbuf *nmb, *rxmb;
1225 - uint16_t nb_rx = 0;
1227 - struct vnic_cq *cq;
1228 - volatile struct cq_desc *cqd_ptr;
1231 - cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1232 - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
1233 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1235 - nb_hold = rq->rx_nb_hold; /* mbufs held by software */
1237 - while (nb_rx < nb_pkts) {
1238 - volatile struct rq_enet_desc *rqd_ptr;
1239 - dma_addr_t dma_addr;
1240 - struct cq_desc cqd;
1241 - uint64_t ol_err_flags;
1242 - uint8_t packet_error;
1244 - /* Check for pkts available */
1245 - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
1246 - & CQ_DESC_COLOR_MASK;
1247 - if (color == cq->last_color)
1250 - /* Get the cq descriptor and rq pointer */
1252 - rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
1254 - /* allocate a new mbuf */
1255 - nmb = rte_rxmbuf_alloc(rq->mp);
1256 - if (nmb == NULL) {
1257 - dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
1258 - enic->port_id, (unsigned)rq->index);
1259 - rte_eth_devices[enic->port_id].
1260 - data->rx_mbuf_alloc_failed++;
1264 - /* A packet error means descriptor and data are untrusted */
1265 - packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
1267 - /* Get the mbuf to return and replace with one just allocated */
1268 - rxmb = rq->mbuf_ring[rx_id];
1269 - rq->mbuf_ring[rx_id] = nmb;
1271 - /* Increment cqd, rqd, mbuf_table index */
1273 - if (unlikely(rx_id == rq->ring.desc_count)) {
1275 - cq->last_color = cq->last_color ? 0 : 1;
1278 - /* Prefetch next mbuf & desc while processing current one */
1279 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1280 - rte_enic_prefetch(cqd_ptr);
1281 - rte_enic_prefetch(rq->mbuf_ring[rx_id]);
1282 - rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
1285 - /* Push descriptor for newly allocated mbuf */
1286 - dma_addr = (dma_addr_t)(nmb->buf_physaddr
1287 - + RTE_PKTMBUF_HEADROOM);
1288 - rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
1289 - rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
1290 - - RTE_PKTMBUF_HEADROOM);
1292 - /* Fill in the rest of the mbuf */
1293 - rxmb->data_off = RTE_PKTMBUF_HEADROOM;
1294 - rxmb->nb_segs = 1;
1295 - rxmb->next = NULL;
1296 - rxmb->port = enic->port_id;
1297 - if (!packet_error) {
1298 - rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
1299 - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
1300 - enic_cq_rx_to_pkt_flags(&cqd, rxmb);
1302 - rxmb->pkt_len = 0;
1303 - rxmb->packet_type = 0;
1304 - rxmb->ol_flags = 0;
1306 - rxmb->data_len = rxmb->pkt_len;
1308 - /* prefetch mbuf data for caller */
1309 - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
1310 - RTE_PKTMBUF_HEADROOM));
1312 - /* store the mbuf address into the next entry of the array */
1313 - rx_pkts[nb_rx++] = rxmb;
1317 - cq->to_clean = rx_id;
1319 - if (nb_hold > rq->rx_free_thresh) {
1320 - rq->posted_index = enic_ring_add(rq->ring.desc_count,
1321 - rq->posted_index, nb_hold);
1324 - iowrite32(rq->posted_index, &rq->ctrl->posted_index);
1327 - rq->rx_nb_hold = nb_hold;
1331 diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
1332 new file mode 100644
1333 index 0000000..71ca34e
1335 +++ b/drivers/net/enic/enic_rxtx.c
1338 + * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
1339 + * Copyright 2007 Nuova Systems, Inc. All rights reserved.
1341 + * Copyright (c) 2016, Cisco Systems, Inc.
1342 + * All rights reserved.
1344 + * Redistribution and use in source and binary forms, with or without
1345 + * modification, are permitted provided that the following conditions
1348 + * 1. Redistributions of source code must retain the above copyright
1349 + * notice, this list of conditions and the following disclaimer.
1351 + * 2. Redistributions in binary form must reproduce the above copyright
1352 + * notice, this list of conditions and the following disclaimer in
1353 + * the documentation and/or other materials provided with the
1356 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1357 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1358 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
1359 + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
1360 + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
1361 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
1362 + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1363 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
1364 + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1365 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
1366 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1367 + * POSSIBILITY OF SUCH DAMAGE.
1371 +#include <rte_mbuf.h>
1372 +#include <rte_ethdev.h>
1373 +#include <rte_prefetch.h>
1374 +#include <rte_memzone.h>
1376 +#include "enic_compat.h"
1377 +#include "rq_enet_desc.h"
1380 +#define RTE_PMD_USE_PREFETCH
1382 +#ifdef RTE_PMD_USE_PREFETCH
1384 + * Prefetch a cache line into all cache levels.
1386 +#define rte_enic_prefetch(p) rte_prefetch0(p)
1388 +#define rte_enic_prefetch(p) do {} while (0)
1391 +#ifdef RTE_PMD_PACKET_PREFETCH
1392 +#define rte_packet_prefetch(p) rte_prefetch1(p)
1394 +#define rte_packet_prefetch(p) do {} while (0)
1397 +static inline struct rte_mbuf *
1398 +rte_rxmbuf_alloc(struct rte_mempool *mp)
1400 + struct rte_mbuf *m;
1402 + m = __rte_mbuf_raw_alloc(mp);
1403 + __rte_mbuf_sanity_check_raw(m, 0);
1407 +static inline uint16_t
1408 +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
1410 + return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
1413 +static inline uint16_t
1414 +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
1416 + return(le16_to_cpu(crd->bytes_written_flags) &
1417 + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
1420 +static inline uint8_t
1421 +enic_cq_rx_desc_packet_error(uint16_t bwflags)
1423 + return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
1424 + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
1427 +static inline uint8_t
1428 +enic_cq_rx_desc_eop(uint16_t ciflags)
1430 + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
1431 + == CQ_ENET_RQ_DESC_FLAGS_EOP;
1434 +static inline uint8_t
1435 +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
1437 + return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
1438 + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
1439 + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
1442 +static inline uint8_t
1443 +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
1445 + return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
1446 + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
1449 +static inline uint8_t
1450 +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
1452 + return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
1453 + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
1456 +static inline uint8_t
1457 +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
1459 + return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
1460 + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
1463 +static inline uint32_t
1464 +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
1466 + return le32_to_cpu(cqrd->rss_hash);
1469 +static inline uint16_t
1470 +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
1472 + return le16_to_cpu(cqrd->vlan);
1475 +static inline uint16_t
1476 +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
1478 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1479 + return le16_to_cpu(cqrd->bytes_written_flags) &
1480 + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
1483 +static inline uint8_t
1484 +enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
1486 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1489 + uint64_t pkt_err_flags = 0;
1491 + bwflags = enic_cq_rx_desc_bwflags(cqrd);
1492 + if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
1493 + pkt_err_flags = PKT_RX_MAC_ERR;
1496 + *pkt_err_flags_out = pkt_err_flags;
1501 + * Lookup table to translate RX CQ flags to mbuf flags.
1503 +static inline uint32_t
1504 +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
1506 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1507 + uint8_t cqrd_flags = cqrd->flags;
1508 + static const uint32_t cq_type_table[128] __rte_cache_aligned = {
1509 + [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
1510 + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1511 + | RTE_PTYPE_L4_UDP,
1512 + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1513 + | RTE_PTYPE_L4_TCP,
1514 + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1515 + | RTE_PTYPE_L4_FRAG,
1516 + [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
1517 + [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1518 + | RTE_PTYPE_L4_UDP,
1519 + [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1520 + | RTE_PTYPE_L4_TCP,
1521 + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1522 + | RTE_PTYPE_L4_FRAG,
1523 + /* All others reserved */
1525 + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
1526 + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
1527 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
1528 + return cq_type_table[cqrd_flags];
1532 +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
1534 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1535 + uint16_t ciflags, bwflags, pkt_flags = 0;
1536 + ciflags = enic_cq_rx_desc_ciflags(cqrd);
1537 + bwflags = enic_cq_rx_desc_bwflags(cqrd);
1539 + mbuf->ol_flags = 0;
1541 + /* flags are meaningless if !EOP */
1542 + if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
1543 + goto mbuf_flags_done;
1545 + /* VLAN stripping */
1546 + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
1547 + pkt_flags |= PKT_RX_VLAN_PKT;
1548 + mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
1550 + mbuf->vlan_tci = 0;
1554 + if (enic_cq_rx_desc_rss_type(cqrd)) {
1555 + pkt_flags |= PKT_RX_RSS_HASH;
1556 + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
1559 + /* checksum flags */
1560 + if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
1561 + (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
1562 + if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
1563 + pkt_flags |= PKT_RX_IP_CKSUM_BAD;
1564 + if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
1565 + if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
1566 + pkt_flags |= PKT_RX_L4_CKSUM_BAD;
1571 + mbuf->ol_flags = pkt_flags;
1575 +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1578 + struct vnic_rq *rq = rx_queue;
1579 + struct enic *enic = vnic_dev_priv(rq->vdev);
1580 + unsigned int rx_id;
1581 + struct rte_mbuf *nmb, *rxmb;
1582 + uint16_t nb_rx = 0;
1584 + struct vnic_cq *cq;
1585 + volatile struct cq_desc *cqd_ptr;
1588 + cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1589 + rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
1590 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1592 + nb_hold = rq->rx_nb_hold; /* mbufs held by software */
1594 + while (nb_rx < nb_pkts) {
1595 + volatile struct rq_enet_desc *rqd_ptr;
1596 + dma_addr_t dma_addr;
1597 + struct cq_desc cqd;
1598 + uint64_t ol_err_flags;
1599 + uint8_t packet_error;
1601 + /* Check for pkts available */
1602 + color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
1603 + & CQ_DESC_COLOR_MASK;
1604 + if (color == cq->last_color)
1607 + /* Get the cq descriptor and rq pointer */
1609 + rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
1611 + /* allocate a new mbuf */
1612 + nmb = rte_rxmbuf_alloc(rq->mp);
1613 + if (nmb == NULL) {
1614 + dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
1615 + enic->port_id, (unsigned)rq->index);
1616 + rte_eth_devices[enic->port_id].
1617 + data->rx_mbuf_alloc_failed++;
1621 + /* A packet error means descriptor and data are untrusted */
1622 + packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
1624 + /* Get the mbuf to return and replace with one just allocated */
1625 + rxmb = rq->mbuf_ring[rx_id];
1626 + rq->mbuf_ring[rx_id] = nmb;
1628 + /* Increment cqd, rqd, mbuf_table index */
1630 + if (unlikely(rx_id == rq->ring.desc_count)) {
1632 + cq->last_color = cq->last_color ? 0 : 1;
1635 + /* Prefetch next mbuf & desc while processing current one */
1636 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1637 + rte_enic_prefetch(cqd_ptr);
1638 + rte_enic_prefetch(rq->mbuf_ring[rx_id]);
1639 + rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
1642 + /* Push descriptor for newly allocated mbuf */
1643 + dma_addr = (dma_addr_t)(nmb->buf_physaddr
1644 + + RTE_PKTMBUF_HEADROOM);
1645 + rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
1646 + rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
1647 + - RTE_PKTMBUF_HEADROOM);
1649 + /* Fill in the rest of the mbuf */
1650 + rxmb->data_off = RTE_PKTMBUF_HEADROOM;
1651 + rxmb->nb_segs = 1;
1652 + rxmb->next = NULL;
1653 + rxmb->port = enic->port_id;
1654 + if (!packet_error) {
1655 + rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
1656 + rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
1657 + enic_cq_rx_to_pkt_flags(&cqd, rxmb);
1659 + rxmb->pkt_len = 0;
1660 + rxmb->packet_type = 0;
1661 + rxmb->ol_flags = 0;
1663 + rxmb->data_len = rxmb->pkt_len;
1665 + /* prefetch mbuf data for caller */
1666 + rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
1667 + RTE_PKTMBUF_HEADROOM));
1669 + /* store the mbuf address into the next entry of the array */
1670 + rx_pkts[nb_rx++] = rxmb;
1674 + cq->to_clean = rx_id;
1676 + if (nb_hold > rq->rx_free_thresh) {
1677 + rq->posted_index = enic_ring_add(rq->ring.desc_count,
1678 + rq->posted_index, nb_hold);
1681 + iowrite32(rq->posted_index, &rq->ctrl->posted_index);
1684 + rq->rx_nb_hold = nb_hold;
1689 +static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
1691 + struct vnic_wq_buf *buf;
1692 + struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
1693 + unsigned int nb_to_free, nb_free = 0, i;
1694 + struct rte_mempool *pool;
1695 + unsigned int tail_idx;
1696 + unsigned int desc_count = wq->ring.desc_count;
1698 + nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
1700 + tail_idx = wq->tail_idx;
1701 + buf = &wq->bufs[tail_idx];
1702 + pool = ((struct rte_mbuf *)buf->mb)->pool;
1703 + for (i = 0; i < nb_to_free; i++) {
1704 + buf = &wq->bufs[tail_idx];
1705 + m = (struct rte_mbuf *)(buf->mb);
1706 + if (likely(m->pool == pool)) {
1707 + ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
1708 + free[nb_free++] = m;
1710 + rte_mempool_put_bulk(pool, (void *)free, nb_free);
1715 + tail_idx = enic_ring_incr(desc_count, tail_idx);
1719 + rte_mempool_put_bulk(pool, (void **)free, nb_free);
1721 + wq->tail_idx = tail_idx;
1722 + wq->ring.desc_avail += nb_to_free;
1725 +unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
1727 + u16 completed_index;
1729 + completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
1731 + if (wq->last_completed_index != completed_index) {
1732 + enic_free_wq_bufs(wq, completed_index);
1733 + wq->last_completed_index = completed_index;
1738 +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1742 + unsigned int pkt_len, data_len;
1743 + unsigned int nb_segs;
1744 + struct rte_mbuf *tx_pkt;
1745 + struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
1746 + struct enic *enic = vnic_dev_priv(wq->vdev);
1747 + unsigned short vlan_id;
1748 + unsigned short ol_flags;
1749 + unsigned int wq_desc_avail;
1751 + struct vnic_wq_buf *buf;
1752 + unsigned int hw_ip_cksum_enabled;
1753 + unsigned int desc_count;
1754 + struct wq_enet_desc *descs, *desc_p, desc_tmp;
1756 + uint8_t vlan_tag_insert;
1758 + uint64_t bus_addr;
1760 + enic_cleanup_wq(enic, wq);
1761 + wq_desc_avail = vnic_wq_desc_avail(wq);
1762 + head_idx = wq->head_idx;
1763 + desc_count = wq->ring.desc_count;
1765 + nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
1767 + hw_ip_cksum_enabled = enic->hw_ip_checksum;
1768 + for (index = 0; index < nb_pkts; index++) {
1769 + tx_pkt = *tx_pkts++;
1770 + nb_segs = tx_pkt->nb_segs;
1771 + if (nb_segs > wq_desc_avail) {
1777 + pkt_len = tx_pkt->pkt_len;
1778 + data_len = tx_pkt->data_len;
1779 + vlan_id = tx_pkt->vlan_tci;
1780 + ol_flags = tx_pkt->ol_flags;
1783 + vlan_tag_insert = 0;
1784 + bus_addr = (dma_addr_t)
1785 + (tx_pkt->buf_physaddr + tx_pkt->data_off);
1787 + descs = (struct wq_enet_desc *)wq->ring.descs;
1788 + desc_p = descs + head_idx;
1790 + eop = (data_len == pkt_len);
1792 + if (ol_flags & PKT_TX_VLAN_PKT)
1793 + vlan_tag_insert = 1;
1795 + if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM))
1796 + mss |= ENIC_CALC_IP_CKSUM;
1798 + if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM))
1799 + mss |= ENIC_CALC_TCP_UDP_CKSUM;
1801 + wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
1802 + eop, 0, vlan_tag_insert, vlan_id, 0);
1804 + *desc_p = desc_tmp;
1805 + buf = &wq->bufs[head_idx];
1806 + buf->mb = (void *)tx_pkt;
1807 + head_idx = enic_ring_incr(desc_count, head_idx);
1811 + for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
1813 + data_len = tx_pkt->data_len;
1815 + if (tx_pkt->next == NULL)
1817 + desc_p = descs + head_idx;
1818 + bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
1819 + + tx_pkt->data_off);
1820 + wq_enet_desc_enc((struct wq_enet_desc *)
1821 + &desc_tmp, bus_addr, data_len,
1822 + mss, 0, 0, eop, eop, 0,
1823 + vlan_tag_insert, vlan_id, 0);
1825 + *desc_p = desc_tmp;
1826 + buf = &wq->bufs[head_idx];
1827 + buf->mb = (void *)tx_pkt;
1828 + head_idx = enic_ring_incr(desc_count, head_idx);
1835 + iowrite32(head_idx, &wq->ctrl->posted_index);
1837 + wq->ring.desc_avail = wq_desc_avail;
1838 + wq->head_idx = head_idx;