Imported Upstream version 16.11.2
[deb_dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <unistd.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
51 #include <rte_pci.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_ip.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71 #include <rte_errno.h>
72
73 #include "base/vmxnet3_defs.h"
74 #include "vmxnet3_ring.h"
75
76 #include "vmxnet3_logs.h"
77 #include "vmxnet3_ethdev.h"
78
79 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
80
81 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
82 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
83 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
84 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
85 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
86 #endif
87
88 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
89 static void
90 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
91 {
92         uint32_t avail = 0;
93
94         if (rxq == NULL)
95                 return;
96
97         PMD_RX_LOG(DEBUG,
98                    "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
99                    rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
100         PMD_RX_LOG(DEBUG,
101                    "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
102                    (unsigned long)rxq->cmd_ring[0].basePA,
103                    (unsigned long)rxq->cmd_ring[1].basePA,
104                    (unsigned long)rxq->comp_ring.basePA);
105
106         avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
107         PMD_RX_LOG(DEBUG,
108                    "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
109                    (uint32_t)rxq->cmd_ring[0].size, avail,
110                    rxq->comp_ring.next2proc,
111                    rxq->cmd_ring[0].size - avail);
112
113         avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
114         PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
115                    (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
116                    rxq->cmd_ring[1].size - avail);
117
118 }
119
120 static void
121 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
122 {
123         uint32_t avail = 0;
124
125         if (txq == NULL)
126                 return;
127
128         PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
129                    txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
130         PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
131                    (unsigned long)txq->cmd_ring.basePA,
132                    (unsigned long)txq->comp_ring.basePA,
133                    (unsigned long)txq->data_ring.basePA);
134
135         avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
136         PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
137                    (uint32_t)txq->cmd_ring.size, avail,
138                    txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
139 }
140 #endif
141
142 static void
143 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
144 {
145         while (ring->next2comp != ring->next2fill) {
146                 /* No need to worry about desc ownership, device is quiesced by now. */
147                 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
148
149                 if (buf_info->m) {
150                         rte_pktmbuf_free(buf_info->m);
151                         buf_info->m = NULL;
152                         buf_info->bufPA = 0;
153                         buf_info->len = 0;
154                 }
155                 vmxnet3_cmd_ring_adv_next2comp(ring);
156         }
157 }
158
159 static void
160 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
161 {
162         uint32_t i;
163
164         for (i = 0; i < ring->size; i++) {
165                 /* No need to worry about desc ownership, device is quiesced by now. */
166                 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
167
168                 if (buf_info->m) {
169                         rte_pktmbuf_free_seg(buf_info->m);
170                         buf_info->m = NULL;
171                         buf_info->bufPA = 0;
172                         buf_info->len = 0;
173                 }
174                 vmxnet3_cmd_ring_adv_next2comp(ring);
175         }
176 }
177
178 static void
179 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
180 {
181         rte_free(ring->buf_info);
182         ring->buf_info = NULL;
183 }
184
185 void
186 vmxnet3_dev_tx_queue_release(void *txq)
187 {
188         vmxnet3_tx_queue_t *tq = txq;
189
190         if (tq != NULL) {
191                 /* Release mbufs */
192                 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
193                 /* Release the cmd_ring */
194                 vmxnet3_cmd_ring_release(&tq->cmd_ring);
195                 /* Release the memzone */
196                 rte_memzone_free(tq->mz);
197         }
198 }
199
200 void
201 vmxnet3_dev_rx_queue_release(void *rxq)
202 {
203         int i;
204         vmxnet3_rx_queue_t *rq = rxq;
205
206         if (rq != NULL) {
207                 /* Release mbufs */
208                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
209                         vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
210
211                 /* Release both the cmd_rings */
212                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
213                         vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
214
215                 /* Release the memzone */
216                 rte_memzone_free(rq->mz);
217         }
218 }
219
220 static void
221 vmxnet3_dev_tx_queue_reset(void *txq)
222 {
223         vmxnet3_tx_queue_t *tq = txq;
224         struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
225         struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
226         struct vmxnet3_data_ring *data_ring = &tq->data_ring;
227         int size;
228
229         if (tq != NULL) {
230                 /* Release the cmd_ring mbufs */
231                 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
232         }
233
234         /* Tx vmxnet rings structure initialization*/
235         ring->next2fill = 0;
236         ring->next2comp = 0;
237         ring->gen = VMXNET3_INIT_GEN;
238         comp_ring->next2proc = 0;
239         comp_ring->gen = VMXNET3_INIT_GEN;
240
241         size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
242         size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
243         size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
244
245         memset(ring->base, 0, size);
246 }
247
248 static void
249 vmxnet3_dev_rx_queue_reset(void *rxq)
250 {
251         int i;
252         vmxnet3_rx_queue_t *rq = rxq;
253         struct vmxnet3_cmd_ring *ring0, *ring1;
254         struct vmxnet3_comp_ring *comp_ring;
255         int size;
256
257         if (rq != NULL) {
258                 /* Release both the cmd_rings mbufs */
259                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
260                         vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
261         }
262
263         ring0 = &rq->cmd_ring[0];
264         ring1 = &rq->cmd_ring[1];
265         comp_ring = &rq->comp_ring;
266
267         /* Rx vmxnet rings structure initialization */
268         ring0->next2fill = 0;
269         ring1->next2fill = 0;
270         ring0->next2comp = 0;
271         ring1->next2comp = 0;
272         ring0->gen = VMXNET3_INIT_GEN;
273         ring1->gen = VMXNET3_INIT_GEN;
274         comp_ring->next2proc = 0;
275         comp_ring->gen = VMXNET3_INIT_GEN;
276
277         size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
278         size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
279
280         memset(ring0->base, 0, size);
281 }
282
283 void
284 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
285 {
286         unsigned i;
287
288         PMD_INIT_FUNC_TRACE();
289
290         for (i = 0; i < dev->data->nb_tx_queues; i++) {
291                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
292
293                 if (txq != NULL) {
294                         txq->stopped = TRUE;
295                         vmxnet3_dev_tx_queue_reset(txq);
296                 }
297         }
298
299         for (i = 0; i < dev->data->nb_rx_queues; i++) {
300                 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
301
302                 if (rxq != NULL) {
303                         rxq->stopped = TRUE;
304                         vmxnet3_dev_rx_queue_reset(rxq);
305                 }
306         }
307 }
308
309 static int
310 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
311 {
312         int completed = 0;
313         struct rte_mbuf *mbuf;
314
315         /* Release cmd_ring descriptor and free mbuf */
316         RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
317
318         mbuf = txq->cmd_ring.buf_info[eop_idx].m;
319         if (mbuf == NULL)
320                 rte_panic("EOP desc does not point to a valid mbuf");
321         rte_pktmbuf_free(mbuf);
322
323         txq->cmd_ring.buf_info[eop_idx].m = NULL;
324
325         while (txq->cmd_ring.next2comp != eop_idx) {
326                 /* no out-of-order completion */
327                 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
328                 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
329                 completed++;
330         }
331
332         /* Mark the txd for which tcd was generated as completed */
333         vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
334
335         return completed + 1;
336 }
337
338 static void
339 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
340 {
341         int completed = 0;
342         vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
343         struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
344                 (comp_ring->base + comp_ring->next2proc);
345
346         while (tcd->gen == comp_ring->gen) {
347                 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
348
349                 vmxnet3_comp_ring_adv_next2proc(comp_ring);
350                 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
351                                                     comp_ring->next2proc);
352         }
353
354         PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
355 }
356
357 uint16_t
358 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
359                   uint16_t nb_pkts)
360 {
361         uint16_t nb_tx;
362         vmxnet3_tx_queue_t *txq = tx_queue;
363         struct vmxnet3_hw *hw = txq->hw;
364         Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
365         uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
366
367         if (unlikely(txq->stopped)) {
368                 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
369                 return 0;
370         }
371
372         /* Free up the comp_descriptors aggressively */
373         vmxnet3_tq_tx_complete(txq);
374
375         nb_tx = 0;
376         while (nb_tx < nb_pkts) {
377                 Vmxnet3_GenericDesc *gdesc;
378                 vmxnet3_buf_info_t *tbi;
379                 uint32_t first2fill, avail, dw2;
380                 struct rte_mbuf *txm = tx_pkts[nb_tx];
381                 struct rte_mbuf *m_seg = txm;
382                 int copy_size = 0;
383                 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
384                 /* # of descriptors needed for a packet. */
385                 unsigned count = txm->nb_segs;
386
387                 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
388                 if (count > avail) {
389                         /* Is command ring full? */
390                         if (unlikely(avail == 0)) {
391                                 PMD_TX_LOG(DEBUG, "No free ring descriptors");
392                                 txq->stats.tx_ring_full++;
393                                 txq->stats.drop_total += (nb_pkts - nb_tx);
394                                 break;
395                         }
396
397                         /* Command ring is not full but cannot handle the
398                          * multi-segmented packet. Let's try the next packet
399                          * in this case.
400                          */
401                         PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
402                                    "(avail %d needed %d)", avail, count);
403                         txq->stats.drop_total++;
404                         if (tso)
405                                 txq->stats.drop_tso++;
406                         rte_pktmbuf_free(txm);
407                         nb_tx++;
408                         continue;
409                 }
410
411                 /* Drop non-TSO packet that is excessively fragmented */
412                 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
413                         PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
414                                    "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
415                         txq->stats.drop_too_many_segs++;
416                         txq->stats.drop_total++;
417                         rte_pktmbuf_free(txm);
418                         nb_tx++;
419                         continue;
420                 }
421
422                 if (txm->nb_segs == 1 &&
423                     rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
424                         struct Vmxnet3_TxDataDesc *tdd;
425
426                         tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
427                         copy_size = rte_pktmbuf_pkt_len(txm);
428                         rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
429                 }
430
431                 /* use the previous gen bit for the SOP desc */
432                 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
433                 first2fill = txq->cmd_ring.next2fill;
434                 do {
435                         /* Remember the transmit buffer for cleanup */
436                         tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
437
438                         /* NB: the following assumes that VMXNET3 maximum
439                          * transmit buffer size (16K) is greater than
440                          * maximum size of mbuf segment size.
441                          */
442                         gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
443                         if (copy_size)
444                                 gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
445                                                                    txq->cmd_ring.next2fill *
446                                                                    sizeof(struct Vmxnet3_TxDataDesc));
447                         else
448                                 gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
449
450                         gdesc->dword[2] = dw2 | m_seg->data_len;
451                         gdesc->dword[3] = 0;
452
453                         /* move to the next2fill descriptor */
454                         vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
455
456                         /* use the right gen for non-SOP desc */
457                         dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
458                 } while ((m_seg = m_seg->next) != NULL);
459
460                 /* set the last buf_info for the pkt */
461                 tbi->m = txm;
462                 /* Update the EOP descriptor */
463                 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
464
465                 /* Add VLAN tag if present */
466                 gdesc = txq->cmd_ring.base + first2fill;
467                 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
468                         gdesc->txd.ti = 1;
469                         gdesc->txd.tci = txm->vlan_tci;
470                 }
471
472                 if (tso) {
473                         uint16_t mss = txm->tso_segsz;
474
475                         RTE_ASSERT(mss > 0);
476
477                         gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
478                         gdesc->txd.om = VMXNET3_OM_TSO;
479                         gdesc->txd.msscof = mss;
480
481                         deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
482                 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
483                         gdesc->txd.om = VMXNET3_OM_CSUM;
484                         gdesc->txd.hlen = txm->l2_len + txm->l3_len;
485
486                         switch (txm->ol_flags & PKT_TX_L4_MASK) {
487                         case PKT_TX_TCP_CKSUM:
488                                 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
489                                 break;
490                         case PKT_TX_UDP_CKSUM:
491                                 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
492                                 break;
493                         default:
494                                 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
495                                            txm->ol_flags & PKT_TX_L4_MASK);
496                                 abort();
497                         }
498                         deferred++;
499                 } else {
500                         gdesc->txd.hlen = 0;
501                         gdesc->txd.om = VMXNET3_OM_NONE;
502                         gdesc->txd.msscof = 0;
503                         deferred++;
504                 }
505
506                 /* flip the GEN bit on the SOP */
507                 rte_compiler_barrier();
508                 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
509
510                 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
511                 nb_tx++;
512         }
513
514         PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
515
516         if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
517                 txq_ctrl->txNumDeferred = 0;
518                 /* Notify vSwitch that packets are available. */
519                 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
520                                        txq->cmd_ring.next2fill);
521         }
522
523         return nb_tx;
524 }
525
526 static inline void
527 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
528                    struct rte_mbuf *mbuf)
529 {
530         uint32_t val = 0;
531         struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
532         struct Vmxnet3_RxDesc *rxd =
533                 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
534         vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
535
536         if (ring_id == 0)
537                 val = VMXNET3_RXD_BTYPE_HEAD;
538         else
539                 val = VMXNET3_RXD_BTYPE_BODY;
540
541         buf_info->m = mbuf;
542         buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
543         buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
544
545         rxd->addr = buf_info->bufPA;
546         rxd->btype = val;
547         rxd->len = buf_info->len;
548         rxd->gen = ring->gen;
549
550         vmxnet3_cmd_ring_adv_next2fill(ring);
551 }
552 /*
553  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
554  *  so that device can receive packets in those buffers.
555  *  Ring layout:
556  *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
557  *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
558  *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
559  *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
560  *      only for LRO.
561  */
562 static int
563 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
564 {
565         int err = 0;
566         uint32_t i = 0, val = 0;
567         struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
568
569         if (ring_id == 0) {
570                 /* Usually: One HEAD type buf per packet
571                  * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
572                  * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
573                  */
574
575                 /* We use single packet buffer so all heads here */
576                 val = VMXNET3_RXD_BTYPE_HEAD;
577         } else {
578                 /* All BODY type buffers for 2nd ring */
579                 val = VMXNET3_RXD_BTYPE_BODY;
580         }
581
582         while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
583                 struct Vmxnet3_RxDesc *rxd;
584                 struct rte_mbuf *mbuf;
585                 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
586
587                 rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
588
589                 /* Allocate blank mbuf for the current Rx Descriptor */
590                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
591                 if (unlikely(mbuf == NULL)) {
592                         PMD_RX_LOG(ERR, "Error allocating mbuf");
593                         rxq->stats.rx_buf_alloc_failure++;
594                         err = ENOMEM;
595                         break;
596                 }
597
598                 /*
599                  * Load mbuf pointer into buf_info[ring_size]
600                  * buf_info structure is equivalent to cookie for virtio-virtqueue
601                  */
602                 buf_info->m = mbuf;
603                 buf_info->len = (uint16_t)(mbuf->buf_len -
604                                            RTE_PKTMBUF_HEADROOM);
605                 buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
606
607                 /* Load Rx Descriptor with the buffer's GPA */
608                 rxd->addr = buf_info->bufPA;
609
610                 /* After this point rxd->addr MUST not be NULL */
611                 rxd->btype = val;
612                 rxd->len = buf_info->len;
613                 /* Flip gen bit at the end to change ownership */
614                 rxd->gen = ring->gen;
615
616                 vmxnet3_cmd_ring_adv_next2fill(ring);
617                 i++;
618         }
619
620         /* Return error only if no buffers are posted at present */
621         if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
622                 return -err;
623         else
624                 return i;
625 }
626
627
628 /* Receive side checksum and other offloads */
629 static void
630 vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
631 {
632         /* Check for RSS */
633         if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
634                 rxm->ol_flags |= PKT_RX_RSS_HASH;
635                 rxm->hash.rss = rcd->rssHash;
636         }
637
638         /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
639         if (rcd->v4) {
640                 struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
641                 struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
642
643                 if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
644                         rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
645                 else
646                         rxm->packet_type = RTE_PTYPE_L3_IPV4;
647
648                 if (!rcd->cnc) {
649                         if (!rcd->ipc)
650                                 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
651
652                         if ((rcd->tcp || rcd->udp) && !rcd->tuc)
653                                 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
654                 }
655         }
656 }
657
658 /*
659  * Process the Rx Completion Ring of given vmxnet3_rx_queue
660  * for nb_pkts burst and return the number of packets received
661  */
662 uint16_t
663 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
664 {
665         uint16_t nb_rx;
666         uint32_t nb_rxd, idx;
667         uint8_t ring_idx;
668         vmxnet3_rx_queue_t *rxq;
669         Vmxnet3_RxCompDesc *rcd;
670         vmxnet3_buf_info_t *rbi;
671         Vmxnet3_RxDesc *rxd;
672         struct rte_mbuf *rxm = NULL;
673         struct vmxnet3_hw *hw;
674
675         nb_rx = 0;
676         ring_idx = 0;
677         nb_rxd = 0;
678         idx = 0;
679
680         rxq = rx_queue;
681         hw = rxq->hw;
682
683         rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
684
685         if (unlikely(rxq->stopped)) {
686                 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
687                 return 0;
688         }
689
690         while (rcd->gen == rxq->comp_ring.gen) {
691                 struct rte_mbuf *newm;
692
693                 if (nb_rx >= nb_pkts)
694                         break;
695
696                 newm = rte_mbuf_raw_alloc(rxq->mp);
697                 if (unlikely(newm == NULL)) {
698                         PMD_RX_LOG(ERR, "Error allocating mbuf");
699                         rxq->stats.rx_buf_alloc_failure++;
700                         break;
701                 }
702
703                 idx = rcd->rxdIdx;
704                 ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
705                 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
706                 RTE_SET_USED(rxd); /* used only for assert when enabled */
707                 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
708
709                 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
710
711                 RTE_ASSERT(rcd->len <= rxd->len);
712                 RTE_ASSERT(rbi->m);
713
714                 /* Get the packet buffer pointer from buf_info */
715                 rxm = rbi->m;
716
717                 /* Clear descriptor associated buf_info to be reused */
718                 rbi->m = NULL;
719                 rbi->bufPA = 0;
720
721                 /* Update the index that we received a packet */
722                 rxq->cmd_ring[ring_idx].next2comp = idx;
723
724                 /* For RCD with EOP set, check if there is frame error */
725                 if (unlikely(rcd->eop && rcd->err)) {
726                         rxq->stats.drop_total++;
727                         rxq->stats.drop_err++;
728
729                         if (!rcd->fcs) {
730                                 rxq->stats.drop_fcs++;
731                                 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
732                         }
733                         PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
734                                    (int)(rcd - (struct Vmxnet3_RxCompDesc *)
735                                          rxq->comp_ring.base), rcd->rxdIdx);
736                         rte_pktmbuf_free_seg(rxm);
737                         goto rcd_done;
738                 }
739
740                 /* Initialize newly received packet buffer */
741                 rxm->port = rxq->port_id;
742                 rxm->nb_segs = 1;
743                 rxm->next = NULL;
744                 rxm->pkt_len = (uint16_t)rcd->len;
745                 rxm->data_len = (uint16_t)rcd->len;
746                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
747                 rxm->ol_flags = 0;
748                 rxm->vlan_tci = 0;
749
750                 /*
751                  * If this is the first buffer of the received packet,
752                  * set the pointer to the first mbuf of the packet
753                  * Otherwise, update the total length and the number of segments
754                  * of the current scattered packet, and update the pointer to
755                  * the last mbuf of the current packet.
756                  */
757                 if (rcd->sop) {
758                         RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
759
760                         if (unlikely(rcd->len == 0)) {
761                                 RTE_ASSERT(rcd->eop);
762
763                                 PMD_RX_LOG(DEBUG,
764                                            "Rx buf was skipped. rxring[%d][%d])",
765                                            ring_idx, idx);
766                                 rte_pktmbuf_free_seg(rxm);
767                                 goto rcd_done;
768                         }
769
770                         rxq->start_seg = rxm;
771                         vmxnet3_rx_offload(rcd, rxm);
772                 } else {
773                         struct rte_mbuf *start = rxq->start_seg;
774
775                         RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
776
777                         start->pkt_len += rxm->data_len;
778                         start->nb_segs++;
779
780                         rxq->last_seg->next = rxm;
781                 }
782                 rxq->last_seg = rxm;
783
784                 if (rcd->eop) {
785                         struct rte_mbuf *start = rxq->start_seg;
786
787                         /* Check for hardware stripped VLAN tag */
788                         if (rcd->ts) {
789                                 start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
790                                 start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
791                         }
792
793                         rx_pkts[nb_rx++] = start;
794                         rxq->start_seg = NULL;
795                 }
796
797 rcd_done:
798                 rxq->cmd_ring[ring_idx].next2comp = idx;
799                 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
800                                           rxq->cmd_ring[ring_idx].size);
801
802                 /* It's time to renew descriptors */
803                 vmxnet3_renew_desc(rxq, ring_idx, newm);
804                 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
805                         VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
806                                                rxq->cmd_ring[ring_idx].next2fill);
807                 }
808
809                 /* Advance to the next descriptor in comp_ring */
810                 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
811
812                 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
813                 nb_rxd++;
814                 if (nb_rxd > rxq->cmd_ring[0].size) {
815                         PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
816                                    " relinquish control.");
817                         break;
818                 }
819         }
820
821         return nb_rx;
822 }
823
824 int
825 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
826                            uint16_t queue_idx,
827                            uint16_t nb_desc,
828                            unsigned int socket_id,
829                            __rte_unused const struct rte_eth_txconf *tx_conf)
830 {
831         struct vmxnet3_hw *hw = dev->data->dev_private;
832         const struct rte_memzone *mz;
833         struct vmxnet3_tx_queue *txq;
834         struct vmxnet3_cmd_ring *ring;
835         struct vmxnet3_comp_ring *comp_ring;
836         struct vmxnet3_data_ring *data_ring;
837         int size;
838
839         PMD_INIT_FUNC_TRACE();
840
841         if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
842             ETH_TXQ_FLAGS_NOXSUMSCTP) {
843                 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
844                 return -EINVAL;
845         }
846
847         txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
848                           RTE_CACHE_LINE_SIZE);
849         if (txq == NULL) {
850                 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
851                 return -ENOMEM;
852         }
853
854         txq->queue_id = queue_idx;
855         txq->port_id = dev->data->port_id;
856         txq->shared = &hw->tqd_start[queue_idx];
857         txq->hw = hw;
858         txq->qid = queue_idx;
859         txq->stopped = TRUE;
860
861         ring = &txq->cmd_ring;
862         comp_ring = &txq->comp_ring;
863         data_ring = &txq->data_ring;
864
865         /* Tx vmxnet ring length should be between 512-4096 */
866         if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
867                 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
868                              VMXNET3_DEF_TX_RING_SIZE);
869                 return -EINVAL;
870         } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
871                 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
872                              VMXNET3_TX_RING_MAX_SIZE);
873                 return -EINVAL;
874         } else {
875                 ring->size = nb_desc;
876                 ring->size &= ~VMXNET3_RING_SIZE_MASK;
877         }
878         comp_ring->size = data_ring->size = ring->size;
879
880         /* Tx vmxnet rings structure initialization*/
881         ring->next2fill = 0;
882         ring->next2comp = 0;
883         ring->gen = VMXNET3_INIT_GEN;
884         comp_ring->next2proc = 0;
885         comp_ring->gen = VMXNET3_INIT_GEN;
886
887         size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
888         size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
889         size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
890
891         mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
892                                       VMXNET3_RING_BA_ALIGN, socket_id);
893         if (mz == NULL) {
894                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
895                 return -ENOMEM;
896         }
897         txq->mz = mz;
898         memset(mz->addr, 0, mz->len);
899
900         /* cmd_ring initialization */
901         ring->base = mz->addr;
902         ring->basePA = mz->phys_addr;
903
904         /* comp_ring initialization */
905         comp_ring->base = ring->base + ring->size;
906         comp_ring->basePA = ring->basePA +
907                 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
908
909         /* data_ring initialization */
910         data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
911         data_ring->basePA = comp_ring->basePA +
912                         (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
913
914         /* cmd_ring0 buf_info allocation */
915         ring->buf_info = rte_zmalloc("tx_ring_buf_info",
916                                      ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
917         if (ring->buf_info == NULL) {
918                 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
919                 return -ENOMEM;
920         }
921
922         /* Update the data portion with txq */
923         dev->data->tx_queues[queue_idx] = txq;
924
925         return 0;
926 }
927
928 int
929 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
930                            uint16_t queue_idx,
931                            uint16_t nb_desc,
932                            unsigned int socket_id,
933                            __rte_unused const struct rte_eth_rxconf *rx_conf,
934                            struct rte_mempool *mp)
935 {
936         const struct rte_memzone *mz;
937         struct vmxnet3_rx_queue *rxq;
938         struct vmxnet3_hw *hw = dev->data->dev_private;
939         struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
940         struct vmxnet3_comp_ring *comp_ring;
941         int size;
942         uint8_t i;
943         char mem_name[32];
944
945         PMD_INIT_FUNC_TRACE();
946
947         rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
948                           RTE_CACHE_LINE_SIZE);
949         if (rxq == NULL) {
950                 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
951                 return -ENOMEM;
952         }
953
954         rxq->mp = mp;
955         rxq->queue_id = queue_idx;
956         rxq->port_id = dev->data->port_id;
957         rxq->shared = &hw->rqd_start[queue_idx];
958         rxq->hw = hw;
959         rxq->qid1 = queue_idx;
960         rxq->qid2 = queue_idx + hw->num_rx_queues;
961         rxq->stopped = TRUE;
962
963         ring0 = &rxq->cmd_ring[0];
964         ring1 = &rxq->cmd_ring[1];
965         comp_ring = &rxq->comp_ring;
966
967         /* Rx vmxnet rings length should be between 256-4096 */
968         if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
969                 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
970                 return -EINVAL;
971         } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
972                 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
973                 return -EINVAL;
974         } else {
975                 ring0->size = nb_desc;
976                 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
977                 ring1->size = ring0->size;
978         }
979
980         comp_ring->size = ring0->size + ring1->size;
981
982         /* Rx vmxnet rings structure initialization */
983         ring0->next2fill = 0;
984         ring1->next2fill = 0;
985         ring0->next2comp = 0;
986         ring1->next2comp = 0;
987         ring0->gen = VMXNET3_INIT_GEN;
988         ring1->gen = VMXNET3_INIT_GEN;
989         comp_ring->next2proc = 0;
990         comp_ring->gen = VMXNET3_INIT_GEN;
991
992         size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
993         size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
994
995         mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
996                                       VMXNET3_RING_BA_ALIGN, socket_id);
997         if (mz == NULL) {
998                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
999                 return -ENOMEM;
1000         }
1001         rxq->mz = mz;
1002         memset(mz->addr, 0, mz->len);
1003
1004         /* cmd_ring0 initialization */
1005         ring0->base = mz->addr;
1006         ring0->basePA = mz->phys_addr;
1007
1008         /* cmd_ring1 initialization */
1009         ring1->base = ring0->base + ring0->size;
1010         ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1011
1012         /* comp_ring initialization */
1013         comp_ring->base = ring1->base + ring1->size;
1014         comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1015                 ring1->size;
1016
1017         /* cmd_ring0-cmd_ring1 buf_info allocation */
1018         for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1019
1020                 ring = &rxq->cmd_ring[i];
1021                 ring->rid = i;
1022                 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1023
1024                 ring->buf_info = rte_zmalloc(mem_name,
1025                                              ring->size * sizeof(vmxnet3_buf_info_t),
1026                                              RTE_CACHE_LINE_SIZE);
1027                 if (ring->buf_info == NULL) {
1028                         PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1029                         return -ENOMEM;
1030                 }
1031         }
1032
1033         /* Update the data portion with rxq */
1034         dev->data->rx_queues[queue_idx] = rxq;
1035
1036         return 0;
1037 }
1038
1039 /*
1040  * Initializes Receive Unit
1041  * Load mbufs in rx queue in advance
1042  */
1043 int
1044 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1045 {
1046         struct vmxnet3_hw *hw = dev->data->dev_private;
1047
1048         int i, ret;
1049         uint8_t j;
1050
1051         PMD_INIT_FUNC_TRACE();
1052
1053         for (i = 0; i < hw->num_rx_queues; i++) {
1054                 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1055
1056                 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1057                         /* Passing 0 as alloc_num will allocate full ring */
1058                         ret = vmxnet3_post_rx_bufs(rxq, j);
1059                         if (ret <= 0) {
1060                                 PMD_INIT_LOG(ERR,
1061                                              "ERROR: Posting Rxq: %d buffers ring: %d",
1062                                              i, j);
1063                                 return -ret;
1064                         }
1065                         /*
1066                          * Updating device with the index:next2fill to fill the
1067                          * mbufs for coming packets.
1068                          */
1069                         if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1070                                 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1071                                                        rxq->cmd_ring[j].next2fill);
1072                         }
1073                 }
1074                 rxq->stopped = FALSE;
1075                 rxq->start_seg = NULL;
1076         }
1077
1078         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1079                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1080
1081                 txq->stopped = FALSE;
1082         }
1083
1084         return 0;
1085 }
1086
1087 static uint8_t rss_intel_key[40] = {
1088         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1089         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1090         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1091         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1092         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1093 };
1094
1095 /*
1096  * Configure RSS feature
1097  */
1098 int
1099 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1100 {
1101         struct vmxnet3_hw *hw = dev->data->dev_private;
1102         struct VMXNET3_RSSConf *dev_rss_conf;
1103         struct rte_eth_rss_conf *port_rss_conf;
1104         uint64_t rss_hf;
1105         uint8_t i, j;
1106
1107         PMD_INIT_FUNC_TRACE();
1108
1109         dev_rss_conf = hw->rss_conf;
1110         port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1111
1112         /* loading hashFunc */
1113         dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1114         /* loading hashKeySize */
1115         dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1116         /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1117         dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1118
1119         if (port_rss_conf->rss_key == NULL) {
1120                 /* Default hash key */
1121                 port_rss_conf->rss_key = rss_intel_key;
1122         }
1123
1124         /* loading hashKey */
1125         memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1126                dev_rss_conf->hashKeySize);
1127
1128         /* loading indTable */
1129         for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1130                 if (j == dev->data->nb_rx_queues)
1131                         j = 0;
1132                 dev_rss_conf->indTable[i] = j;
1133         }
1134
1135         /* loading hashType */
1136         dev_rss_conf->hashType = 0;
1137         rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1138         if (rss_hf & ETH_RSS_IPV4)
1139                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1140         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1141                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1142         if (rss_hf & ETH_RSS_IPV6)
1143                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1144         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1145                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1146
1147         return VMXNET3_SUCCESS;
1148 }