Imported Upstream version 16.11.1
[deb_dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <unistd.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
51 #include <rte_pci.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_ip.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71 #include <rte_errno.h>
72
73 #include "base/vmxnet3_defs.h"
74 #include "vmxnet3_ring.h"
75
76 #include "vmxnet3_logs.h"
77 #include "vmxnet3_ethdev.h"
78
79 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
80
81 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
82 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
83 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
84 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
85 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
86 #endif
87
88 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
89 static void
90 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
91 {
92         uint32_t avail = 0;
93
94         if (rxq == NULL)
95                 return;
96
97         PMD_RX_LOG(DEBUG,
98                    "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
99                    rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
100         PMD_RX_LOG(DEBUG,
101                    "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
102                    (unsigned long)rxq->cmd_ring[0].basePA,
103                    (unsigned long)rxq->cmd_ring[1].basePA,
104                    (unsigned long)rxq->comp_ring.basePA);
105
106         avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
107         PMD_RX_LOG(DEBUG,
108                    "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
109                    (uint32_t)rxq->cmd_ring[0].size, avail,
110                    rxq->comp_ring.next2proc,
111                    rxq->cmd_ring[0].size - avail);
112
113         avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
114         PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
115                    (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
116                    rxq->cmd_ring[1].size - avail);
117
118 }
119
120 static void
121 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
122 {
123         uint32_t avail = 0;
124
125         if (txq == NULL)
126                 return;
127
128         PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
129                    txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
130         PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
131                    (unsigned long)txq->cmd_ring.basePA,
132                    (unsigned long)txq->comp_ring.basePA,
133                    (unsigned long)txq->data_ring.basePA);
134
135         avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
136         PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
137                    (uint32_t)txq->cmd_ring.size, avail,
138                    txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
139 }
140 #endif
141
142 static void
143 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
144 {
145         while (ring->next2comp != ring->next2fill) {
146                 /* No need to worry about desc ownership, device is quiesced by now. */
147                 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
148
149                 if (buf_info->m) {
150                         rte_pktmbuf_free(buf_info->m);
151                         buf_info->m = NULL;
152                         buf_info->bufPA = 0;
153                         buf_info->len = 0;
154                 }
155                 vmxnet3_cmd_ring_adv_next2comp(ring);
156         }
157 }
158
159 static void
160 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
161 {
162         uint32_t i;
163
164         for (i = 0; i < ring->size; i++) {
165                 /* No need to worry about desc ownership, device is quiesced by now. */
166                 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
167
168                 if (buf_info->m) {
169                         rte_pktmbuf_free_seg(buf_info->m);
170                         buf_info->m = NULL;
171                         buf_info->bufPA = 0;
172                         buf_info->len = 0;
173                 }
174                 vmxnet3_cmd_ring_adv_next2comp(ring);
175         }
176 }
177
178 static void
179 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
180 {
181         rte_free(ring->buf_info);
182         ring->buf_info = NULL;
183 }
184
185 void
186 vmxnet3_dev_tx_queue_release(void *txq)
187 {
188         vmxnet3_tx_queue_t *tq = txq;
189
190         if (tq != NULL) {
191                 /* Release mbufs */
192                 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
193                 /* Release the cmd_ring */
194                 vmxnet3_cmd_ring_release(&tq->cmd_ring);
195         }
196 }
197
198 void
199 vmxnet3_dev_rx_queue_release(void *rxq)
200 {
201         int i;
202         vmxnet3_rx_queue_t *rq = rxq;
203
204         if (rq != NULL) {
205                 /* Release mbufs */
206                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
207                         vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
208
209                 /* Release both the cmd_rings */
210                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
211                         vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
212         }
213 }
214
215 static void
216 vmxnet3_dev_tx_queue_reset(void *txq)
217 {
218         vmxnet3_tx_queue_t *tq = txq;
219         struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
220         struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
221         struct vmxnet3_data_ring *data_ring = &tq->data_ring;
222         int size;
223
224         if (tq != NULL) {
225                 /* Release the cmd_ring mbufs */
226                 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
227         }
228
229         /* Tx vmxnet rings structure initialization*/
230         ring->next2fill = 0;
231         ring->next2comp = 0;
232         ring->gen = VMXNET3_INIT_GEN;
233         comp_ring->next2proc = 0;
234         comp_ring->gen = VMXNET3_INIT_GEN;
235
236         size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
237         size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
238         size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
239
240         memset(ring->base, 0, size);
241 }
242
243 static void
244 vmxnet3_dev_rx_queue_reset(void *rxq)
245 {
246         int i;
247         vmxnet3_rx_queue_t *rq = rxq;
248         struct vmxnet3_cmd_ring *ring0, *ring1;
249         struct vmxnet3_comp_ring *comp_ring;
250         int size;
251
252         if (rq != NULL) {
253                 /* Release both the cmd_rings mbufs */
254                 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
255                         vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
256         }
257
258         ring0 = &rq->cmd_ring[0];
259         ring1 = &rq->cmd_ring[1];
260         comp_ring = &rq->comp_ring;
261
262         /* Rx vmxnet rings structure initialization */
263         ring0->next2fill = 0;
264         ring1->next2fill = 0;
265         ring0->next2comp = 0;
266         ring1->next2comp = 0;
267         ring0->gen = VMXNET3_INIT_GEN;
268         ring1->gen = VMXNET3_INIT_GEN;
269         comp_ring->next2proc = 0;
270         comp_ring->gen = VMXNET3_INIT_GEN;
271
272         size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
273         size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
274
275         memset(ring0->base, 0, size);
276 }
277
278 void
279 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
280 {
281         unsigned i;
282
283         PMD_INIT_FUNC_TRACE();
284
285         for (i = 0; i < dev->data->nb_tx_queues; i++) {
286                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
287
288                 if (txq != NULL) {
289                         txq->stopped = TRUE;
290                         vmxnet3_dev_tx_queue_reset(txq);
291                 }
292         }
293
294         for (i = 0; i < dev->data->nb_rx_queues; i++) {
295                 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
296
297                 if (rxq != NULL) {
298                         rxq->stopped = TRUE;
299                         vmxnet3_dev_rx_queue_reset(rxq);
300                 }
301         }
302 }
303
304 static int
305 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
306 {
307         int completed = 0;
308         struct rte_mbuf *mbuf;
309
310         /* Release cmd_ring descriptor and free mbuf */
311         RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
312
313         mbuf = txq->cmd_ring.buf_info[eop_idx].m;
314         if (mbuf == NULL)
315                 rte_panic("EOP desc does not point to a valid mbuf");
316         rte_pktmbuf_free(mbuf);
317
318         txq->cmd_ring.buf_info[eop_idx].m = NULL;
319
320         while (txq->cmd_ring.next2comp != eop_idx) {
321                 /* no out-of-order completion */
322                 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
323                 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
324                 completed++;
325         }
326
327         /* Mark the txd for which tcd was generated as completed */
328         vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
329
330         return completed + 1;
331 }
332
333 static void
334 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
335 {
336         int completed = 0;
337         vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
338         struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
339                 (comp_ring->base + comp_ring->next2proc);
340
341         while (tcd->gen == comp_ring->gen) {
342                 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
343
344                 vmxnet3_comp_ring_adv_next2proc(comp_ring);
345                 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
346                                                     comp_ring->next2proc);
347         }
348
349         PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
350 }
351
352 uint16_t
353 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
354                   uint16_t nb_pkts)
355 {
356         uint16_t nb_tx;
357         vmxnet3_tx_queue_t *txq = tx_queue;
358         struct vmxnet3_hw *hw = txq->hw;
359         Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
360         uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
361
362         if (unlikely(txq->stopped)) {
363                 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
364                 return 0;
365         }
366
367         /* Free up the comp_descriptors aggressively */
368         vmxnet3_tq_tx_complete(txq);
369
370         nb_tx = 0;
371         while (nb_tx < nb_pkts) {
372                 Vmxnet3_GenericDesc *gdesc;
373                 vmxnet3_buf_info_t *tbi;
374                 uint32_t first2fill, avail, dw2;
375                 struct rte_mbuf *txm = tx_pkts[nb_tx];
376                 struct rte_mbuf *m_seg = txm;
377                 int copy_size = 0;
378                 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
379                 /* # of descriptors needed for a packet. */
380                 unsigned count = txm->nb_segs;
381
382                 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
383                 if (count > avail) {
384                         /* Is command ring full? */
385                         if (unlikely(avail == 0)) {
386                                 PMD_TX_LOG(DEBUG, "No free ring descriptors");
387                                 txq->stats.tx_ring_full++;
388                                 txq->stats.drop_total += (nb_pkts - nb_tx);
389                                 break;
390                         }
391
392                         /* Command ring is not full but cannot handle the
393                          * multi-segmented packet. Let's try the next packet
394                          * in this case.
395                          */
396                         PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
397                                    "(avail %d needed %d)", avail, count);
398                         txq->stats.drop_total++;
399                         if (tso)
400                                 txq->stats.drop_tso++;
401                         rte_pktmbuf_free(txm);
402                         nb_tx++;
403                         continue;
404                 }
405
406                 /* Drop non-TSO packet that is excessively fragmented */
407                 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
408                         PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
409                                    "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
410                         txq->stats.drop_too_many_segs++;
411                         txq->stats.drop_total++;
412                         rte_pktmbuf_free(txm);
413                         nb_tx++;
414                         continue;
415                 }
416
417                 if (txm->nb_segs == 1 &&
418                     rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
419                         struct Vmxnet3_TxDataDesc *tdd;
420
421                         tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
422                         copy_size = rte_pktmbuf_pkt_len(txm);
423                         rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
424                 }
425
426                 /* use the previous gen bit for the SOP desc */
427                 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
428                 first2fill = txq->cmd_ring.next2fill;
429                 do {
430                         /* Remember the transmit buffer for cleanup */
431                         tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
432
433                         /* NB: the following assumes that VMXNET3 maximum
434                          * transmit buffer size (16K) is greater than
435                          * maximum size of mbuf segment size.
436                          */
437                         gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
438                         if (copy_size)
439                                 gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
440                                                                    txq->cmd_ring.next2fill *
441                                                                    sizeof(struct Vmxnet3_TxDataDesc));
442                         else
443                                 gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
444
445                         gdesc->dword[2] = dw2 | m_seg->data_len;
446                         gdesc->dword[3] = 0;
447
448                         /* move to the next2fill descriptor */
449                         vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
450
451                         /* use the right gen for non-SOP desc */
452                         dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
453                 } while ((m_seg = m_seg->next) != NULL);
454
455                 /* set the last buf_info for the pkt */
456                 tbi->m = txm;
457                 /* Update the EOP descriptor */
458                 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
459
460                 /* Add VLAN tag if present */
461                 gdesc = txq->cmd_ring.base + first2fill;
462                 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
463                         gdesc->txd.ti = 1;
464                         gdesc->txd.tci = txm->vlan_tci;
465                 }
466
467                 if (tso) {
468                         uint16_t mss = txm->tso_segsz;
469
470                         RTE_ASSERT(mss > 0);
471
472                         gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
473                         gdesc->txd.om = VMXNET3_OM_TSO;
474                         gdesc->txd.msscof = mss;
475
476                         deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
477                 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
478                         gdesc->txd.om = VMXNET3_OM_CSUM;
479                         gdesc->txd.hlen = txm->l2_len + txm->l3_len;
480
481                         switch (txm->ol_flags & PKT_TX_L4_MASK) {
482                         case PKT_TX_TCP_CKSUM:
483                                 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
484                                 break;
485                         case PKT_TX_UDP_CKSUM:
486                                 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
487                                 break;
488                         default:
489                                 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
490                                            txm->ol_flags & PKT_TX_L4_MASK);
491                                 abort();
492                         }
493                         deferred++;
494                 } else {
495                         gdesc->txd.hlen = 0;
496                         gdesc->txd.om = VMXNET3_OM_NONE;
497                         gdesc->txd.msscof = 0;
498                         deferred++;
499                 }
500
501                 /* flip the GEN bit on the SOP */
502                 rte_compiler_barrier();
503                 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
504
505                 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
506                 nb_tx++;
507         }
508
509         PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
510
511         if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
512                 txq_ctrl->txNumDeferred = 0;
513                 /* Notify vSwitch that packets are available. */
514                 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
515                                        txq->cmd_ring.next2fill);
516         }
517
518         return nb_tx;
519 }
520
521 static inline void
522 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
523                    struct rte_mbuf *mbuf)
524 {
525         uint32_t val = 0;
526         struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
527         struct Vmxnet3_RxDesc *rxd =
528                 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
529         vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
530
531         if (ring_id == 0)
532                 val = VMXNET3_RXD_BTYPE_HEAD;
533         else
534                 val = VMXNET3_RXD_BTYPE_BODY;
535
536         buf_info->m = mbuf;
537         buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
538         buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
539
540         rxd->addr = buf_info->bufPA;
541         rxd->btype = val;
542         rxd->len = buf_info->len;
543         rxd->gen = ring->gen;
544
545         vmxnet3_cmd_ring_adv_next2fill(ring);
546 }
547 /*
548  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
549  *  so that device can receive packets in those buffers.
550  *  Ring layout:
551  *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
552  *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
553  *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
554  *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
555  *      only for LRO.
556  */
557 static int
558 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
559 {
560         int err = 0;
561         uint32_t i = 0, val = 0;
562         struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
563
564         if (ring_id == 0) {
565                 /* Usually: One HEAD type buf per packet
566                  * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
567                  * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
568                  */
569
570                 /* We use single packet buffer so all heads here */
571                 val = VMXNET3_RXD_BTYPE_HEAD;
572         } else {
573                 /* All BODY type buffers for 2nd ring */
574                 val = VMXNET3_RXD_BTYPE_BODY;
575         }
576
577         while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
578                 struct Vmxnet3_RxDesc *rxd;
579                 struct rte_mbuf *mbuf;
580                 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
581
582                 rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
583
584                 /* Allocate blank mbuf for the current Rx Descriptor */
585                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
586                 if (unlikely(mbuf == NULL)) {
587                         PMD_RX_LOG(ERR, "Error allocating mbuf");
588                         rxq->stats.rx_buf_alloc_failure++;
589                         err = ENOMEM;
590                         break;
591                 }
592
593                 /*
594                  * Load mbuf pointer into buf_info[ring_size]
595                  * buf_info structure is equivalent to cookie for virtio-virtqueue
596                  */
597                 buf_info->m = mbuf;
598                 buf_info->len = (uint16_t)(mbuf->buf_len -
599                                            RTE_PKTMBUF_HEADROOM);
600                 buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
601
602                 /* Load Rx Descriptor with the buffer's GPA */
603                 rxd->addr = buf_info->bufPA;
604
605                 /* After this point rxd->addr MUST not be NULL */
606                 rxd->btype = val;
607                 rxd->len = buf_info->len;
608                 /* Flip gen bit at the end to change ownership */
609                 rxd->gen = ring->gen;
610
611                 vmxnet3_cmd_ring_adv_next2fill(ring);
612                 i++;
613         }
614
615         /* Return error only if no buffers are posted at present */
616         if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
617                 return -err;
618         else
619                 return i;
620 }
621
622
623 /* Receive side checksum and other offloads */
624 static void
625 vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
626 {
627         /* Check for RSS */
628         if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
629                 rxm->ol_flags |= PKT_RX_RSS_HASH;
630                 rxm->hash.rss = rcd->rssHash;
631         }
632
633         /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
634         if (rcd->v4) {
635                 struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
636                 struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
637
638                 if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
639                         rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
640                 else
641                         rxm->packet_type = RTE_PTYPE_L3_IPV4;
642
643                 if (!rcd->cnc) {
644                         if (!rcd->ipc)
645                                 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
646
647                         if ((rcd->tcp || rcd->udp) && !rcd->tuc)
648                                 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
649                 }
650         }
651 }
652
653 /*
654  * Process the Rx Completion Ring of given vmxnet3_rx_queue
655  * for nb_pkts burst and return the number of packets received
656  */
657 uint16_t
658 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
659 {
660         uint16_t nb_rx;
661         uint32_t nb_rxd, idx;
662         uint8_t ring_idx;
663         vmxnet3_rx_queue_t *rxq;
664         Vmxnet3_RxCompDesc *rcd;
665         vmxnet3_buf_info_t *rbi;
666         Vmxnet3_RxDesc *rxd;
667         struct rte_mbuf *rxm = NULL;
668         struct vmxnet3_hw *hw;
669
670         nb_rx = 0;
671         ring_idx = 0;
672         nb_rxd = 0;
673         idx = 0;
674
675         rxq = rx_queue;
676         hw = rxq->hw;
677
678         rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
679
680         if (unlikely(rxq->stopped)) {
681                 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
682                 return 0;
683         }
684
685         while (rcd->gen == rxq->comp_ring.gen) {
686                 struct rte_mbuf *newm;
687
688                 if (nb_rx >= nb_pkts)
689                         break;
690
691                 newm = rte_mbuf_raw_alloc(rxq->mp);
692                 if (unlikely(newm == NULL)) {
693                         PMD_RX_LOG(ERR, "Error allocating mbuf");
694                         rxq->stats.rx_buf_alloc_failure++;
695                         break;
696                 }
697
698                 idx = rcd->rxdIdx;
699                 ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
700                 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
701                 RTE_SET_USED(rxd); /* used only for assert when enabled */
702                 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
703
704                 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
705
706                 RTE_ASSERT(rcd->len <= rxd->len);
707                 RTE_ASSERT(rbi->m);
708
709                 /* Get the packet buffer pointer from buf_info */
710                 rxm = rbi->m;
711
712                 /* Clear descriptor associated buf_info to be reused */
713                 rbi->m = NULL;
714                 rbi->bufPA = 0;
715
716                 /* Update the index that we received a packet */
717                 rxq->cmd_ring[ring_idx].next2comp = idx;
718
719                 /* For RCD with EOP set, check if there is frame error */
720                 if (unlikely(rcd->eop && rcd->err)) {
721                         rxq->stats.drop_total++;
722                         rxq->stats.drop_err++;
723
724                         if (!rcd->fcs) {
725                                 rxq->stats.drop_fcs++;
726                                 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
727                         }
728                         PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
729                                    (int)(rcd - (struct Vmxnet3_RxCompDesc *)
730                                          rxq->comp_ring.base), rcd->rxdIdx);
731                         rte_pktmbuf_free_seg(rxm);
732                         goto rcd_done;
733                 }
734
735                 /* Initialize newly received packet buffer */
736                 rxm->port = rxq->port_id;
737                 rxm->nb_segs = 1;
738                 rxm->next = NULL;
739                 rxm->pkt_len = (uint16_t)rcd->len;
740                 rxm->data_len = (uint16_t)rcd->len;
741                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
742                 rxm->ol_flags = 0;
743                 rxm->vlan_tci = 0;
744
745                 /*
746                  * If this is the first buffer of the received packet,
747                  * set the pointer to the first mbuf of the packet
748                  * Otherwise, update the total length and the number of segments
749                  * of the current scattered packet, and update the pointer to
750                  * the last mbuf of the current packet.
751                  */
752                 if (rcd->sop) {
753                         RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
754
755                         if (unlikely(rcd->len == 0)) {
756                                 RTE_ASSERT(rcd->eop);
757
758                                 PMD_RX_LOG(DEBUG,
759                                            "Rx buf was skipped. rxring[%d][%d])",
760                                            ring_idx, idx);
761                                 rte_pktmbuf_free_seg(rxm);
762                                 goto rcd_done;
763                         }
764
765                         rxq->start_seg = rxm;
766                         vmxnet3_rx_offload(rcd, rxm);
767                 } else {
768                         struct rte_mbuf *start = rxq->start_seg;
769
770                         RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
771
772                         start->pkt_len += rxm->data_len;
773                         start->nb_segs++;
774
775                         rxq->last_seg->next = rxm;
776                 }
777                 rxq->last_seg = rxm;
778
779                 if (rcd->eop) {
780                         struct rte_mbuf *start = rxq->start_seg;
781
782                         /* Check for hardware stripped VLAN tag */
783                         if (rcd->ts) {
784                                 start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
785                                 start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
786                         }
787
788                         rx_pkts[nb_rx++] = start;
789                         rxq->start_seg = NULL;
790                 }
791
792 rcd_done:
793                 rxq->cmd_ring[ring_idx].next2comp = idx;
794                 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
795                                           rxq->cmd_ring[ring_idx].size);
796
797                 /* It's time to renew descriptors */
798                 vmxnet3_renew_desc(rxq, ring_idx, newm);
799                 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
800                         VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
801                                                rxq->cmd_ring[ring_idx].next2fill);
802                 }
803
804                 /* Advance to the next descriptor in comp_ring */
805                 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
806
807                 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
808                 nb_rxd++;
809                 if (nb_rxd > rxq->cmd_ring[0].size) {
810                         PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
811                                    " relinquish control.");
812                         break;
813                 }
814         }
815
816         return nb_rx;
817 }
818
819 /*
820  * Create memzone for device rings. malloc can't be used as the physical address is
821  * needed. If the memzone is already created, then this function returns a ptr
822  * to the old one.
823  */
824 static const struct rte_memzone *
825 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
826                       uint16_t queue_id, uint32_t ring_size, int socket_id)
827 {
828         char z_name[RTE_MEMZONE_NAMESIZE];
829         const struct rte_memzone *mz;
830
831         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
832                  dev->driver->pci_drv.driver.name, ring_name,
833                  dev->data->port_id, queue_id);
834
835         mz = rte_memzone_lookup(z_name);
836         if (mz)
837                 return mz;
838
839         return rte_memzone_reserve_aligned(z_name, ring_size,
840                                            socket_id, 0, VMXNET3_RING_BA_ALIGN);
841 }
842
843 int
844 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
845                            uint16_t queue_idx,
846                            uint16_t nb_desc,
847                            unsigned int socket_id,
848                            __rte_unused const struct rte_eth_txconf *tx_conf)
849 {
850         struct vmxnet3_hw *hw = dev->data->dev_private;
851         const struct rte_memzone *mz;
852         struct vmxnet3_tx_queue *txq;
853         struct vmxnet3_cmd_ring *ring;
854         struct vmxnet3_comp_ring *comp_ring;
855         struct vmxnet3_data_ring *data_ring;
856         int size;
857
858         PMD_INIT_FUNC_TRACE();
859
860         if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
861             ETH_TXQ_FLAGS_NOXSUMSCTP) {
862                 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
863                 return -EINVAL;
864         }
865
866         txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
867                           RTE_CACHE_LINE_SIZE);
868         if (txq == NULL) {
869                 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
870                 return -ENOMEM;
871         }
872
873         txq->queue_id = queue_idx;
874         txq->port_id = dev->data->port_id;
875         txq->shared = &hw->tqd_start[queue_idx];
876         txq->hw = hw;
877         txq->qid = queue_idx;
878         txq->stopped = TRUE;
879
880         ring = &txq->cmd_ring;
881         comp_ring = &txq->comp_ring;
882         data_ring = &txq->data_ring;
883
884         /* Tx vmxnet ring length should be between 512-4096 */
885         if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
886                 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
887                              VMXNET3_DEF_TX_RING_SIZE);
888                 return -EINVAL;
889         } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
890                 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
891                              VMXNET3_TX_RING_MAX_SIZE);
892                 return -EINVAL;
893         } else {
894                 ring->size = nb_desc;
895                 ring->size &= ~VMXNET3_RING_SIZE_MASK;
896         }
897         comp_ring->size = data_ring->size = ring->size;
898
899         /* Tx vmxnet rings structure initialization*/
900         ring->next2fill = 0;
901         ring->next2comp = 0;
902         ring->gen = VMXNET3_INIT_GEN;
903         comp_ring->next2proc = 0;
904         comp_ring->gen = VMXNET3_INIT_GEN;
905
906         size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
907         size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
908         size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
909
910         mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
911         if (mz == NULL) {
912                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
913                 return -ENOMEM;
914         }
915         memset(mz->addr, 0, mz->len);
916
917         /* cmd_ring initialization */
918         ring->base = mz->addr;
919         ring->basePA = mz->phys_addr;
920
921         /* comp_ring initialization */
922         comp_ring->base = ring->base + ring->size;
923         comp_ring->basePA = ring->basePA +
924                 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
925
926         /* data_ring initialization */
927         data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
928         data_ring->basePA = comp_ring->basePA +
929                         (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
930
931         /* cmd_ring0 buf_info allocation */
932         ring->buf_info = rte_zmalloc("tx_ring_buf_info",
933                                      ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
934         if (ring->buf_info == NULL) {
935                 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
936                 return -ENOMEM;
937         }
938
939         /* Update the data portion with txq */
940         dev->data->tx_queues[queue_idx] = txq;
941
942         return 0;
943 }
944
945 int
946 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
947                            uint16_t queue_idx,
948                            uint16_t nb_desc,
949                            unsigned int socket_id,
950                            __rte_unused const struct rte_eth_rxconf *rx_conf,
951                            struct rte_mempool *mp)
952 {
953         const struct rte_memzone *mz;
954         struct vmxnet3_rx_queue *rxq;
955         struct vmxnet3_hw *hw = dev->data->dev_private;
956         struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
957         struct vmxnet3_comp_ring *comp_ring;
958         int size;
959         uint8_t i;
960         char mem_name[32];
961
962         PMD_INIT_FUNC_TRACE();
963
964         rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
965                           RTE_CACHE_LINE_SIZE);
966         if (rxq == NULL) {
967                 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
968                 return -ENOMEM;
969         }
970
971         rxq->mp = mp;
972         rxq->queue_id = queue_idx;
973         rxq->port_id = dev->data->port_id;
974         rxq->shared = &hw->rqd_start[queue_idx];
975         rxq->hw = hw;
976         rxq->qid1 = queue_idx;
977         rxq->qid2 = queue_idx + hw->num_rx_queues;
978         rxq->stopped = TRUE;
979
980         ring0 = &rxq->cmd_ring[0];
981         ring1 = &rxq->cmd_ring[1];
982         comp_ring = &rxq->comp_ring;
983
984         /* Rx vmxnet rings length should be between 256-4096 */
985         if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
986                 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
987                 return -EINVAL;
988         } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
989                 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
990                 return -EINVAL;
991         } else {
992                 ring0->size = nb_desc;
993                 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
994                 ring1->size = ring0->size;
995         }
996
997         comp_ring->size = ring0->size + ring1->size;
998
999         /* Rx vmxnet rings structure initialization */
1000         ring0->next2fill = 0;
1001         ring1->next2fill = 0;
1002         ring0->next2comp = 0;
1003         ring1->next2comp = 0;
1004         ring0->gen = VMXNET3_INIT_GEN;
1005         ring1->gen = VMXNET3_INIT_GEN;
1006         comp_ring->next2proc = 0;
1007         comp_ring->gen = VMXNET3_INIT_GEN;
1008
1009         size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
1010         size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1011
1012         mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
1013         if (mz == NULL) {
1014                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
1015                 return -ENOMEM;
1016         }
1017         memset(mz->addr, 0, mz->len);
1018
1019         /* cmd_ring0 initialization */
1020         ring0->base = mz->addr;
1021         ring0->basePA = mz->phys_addr;
1022
1023         /* cmd_ring1 initialization */
1024         ring1->base = ring0->base + ring0->size;
1025         ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1026
1027         /* comp_ring initialization */
1028         comp_ring->base = ring1->base + ring1->size;
1029         comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1030                 ring1->size;
1031
1032         /* cmd_ring0-cmd_ring1 buf_info allocation */
1033         for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1034
1035                 ring = &rxq->cmd_ring[i];
1036                 ring->rid = i;
1037                 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1038
1039                 ring->buf_info = rte_zmalloc(mem_name,
1040                                              ring->size * sizeof(vmxnet3_buf_info_t),
1041                                              RTE_CACHE_LINE_SIZE);
1042                 if (ring->buf_info == NULL) {
1043                         PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1044                         return -ENOMEM;
1045                 }
1046         }
1047
1048         /* Update the data portion with rxq */
1049         dev->data->rx_queues[queue_idx] = rxq;
1050
1051         return 0;
1052 }
1053
1054 /*
1055  * Initializes Receive Unit
1056  * Load mbufs in rx queue in advance
1057  */
1058 int
1059 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1060 {
1061         struct vmxnet3_hw *hw = dev->data->dev_private;
1062
1063         int i, ret;
1064         uint8_t j;
1065
1066         PMD_INIT_FUNC_TRACE();
1067
1068         for (i = 0; i < hw->num_rx_queues; i++) {
1069                 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1070
1071                 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1072                         /* Passing 0 as alloc_num will allocate full ring */
1073                         ret = vmxnet3_post_rx_bufs(rxq, j);
1074                         if (ret <= 0) {
1075                                 PMD_INIT_LOG(ERR,
1076                                              "ERROR: Posting Rxq: %d buffers ring: %d",
1077                                              i, j);
1078                                 return -ret;
1079                         }
1080                         /*
1081                          * Updating device with the index:next2fill to fill the
1082                          * mbufs for coming packets.
1083                          */
1084                         if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1085                                 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1086                                                        rxq->cmd_ring[j].next2fill);
1087                         }
1088                 }
1089                 rxq->stopped = FALSE;
1090                 rxq->start_seg = NULL;
1091         }
1092
1093         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1094                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1095
1096                 txq->stopped = FALSE;
1097         }
1098
1099         return 0;
1100 }
1101
1102 static uint8_t rss_intel_key[40] = {
1103         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1104         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1105         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1106         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1107         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1108 };
1109
1110 /*
1111  * Configure RSS feature
1112  */
1113 int
1114 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1115 {
1116         struct vmxnet3_hw *hw = dev->data->dev_private;
1117         struct VMXNET3_RSSConf *dev_rss_conf;
1118         struct rte_eth_rss_conf *port_rss_conf;
1119         uint64_t rss_hf;
1120         uint8_t i, j;
1121
1122         PMD_INIT_FUNC_TRACE();
1123
1124         dev_rss_conf = hw->rss_conf;
1125         port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1126
1127         /* loading hashFunc */
1128         dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1129         /* loading hashKeySize */
1130         dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1131         /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1132         dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1133
1134         if (port_rss_conf->rss_key == NULL) {
1135                 /* Default hash key */
1136                 port_rss_conf->rss_key = rss_intel_key;
1137         }
1138
1139         /* loading hashKey */
1140         memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1141                dev_rss_conf->hashKeySize);
1142
1143         /* loading indTable */
1144         for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1145                 if (j == dev->data->nb_rx_queues)
1146                         j = 0;
1147                 dev_rss_conf->indTable[i] = j;
1148         }
1149
1150         /* loading hashType */
1151         dev_rss_conf->hashType = 0;
1152         rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1153         if (rss_hf & ETH_RSS_IPV4)
1154                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1155         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1156                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1157         if (rss_hf & ETH_RSS_IPV6)
1158                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1159         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1160                 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1161
1162         return VMXNET3_SUCCESS;
1163 }