New upstream version 18.02
[deb_dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013-2016 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10 #include <rte_dev.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
13
14 #include "fm10k.h"
15 #include "base/fm10k_api.h"
16
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
20
21 #define MAIN_VSI_POOL_NUMBER 0
22
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US    100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
32
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
35
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM    128
38 #define GLORT_FD_Q_BASE  0x40
39 #define GLORT_PF_MASK    0xFFC0
40 #define GLORT_FD_MASK    GLORT_PF_MASK
41 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
42
43 int fm10k_logtype_init;
44 int fm10k_logtype_driver;
45
46 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
47 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
48 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
49 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
50 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
51 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
52 static int
53 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
54 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
55         const u8 *mac, bool add, uint32_t pool);
56 static void fm10k_tx_queue_release(void *queue);
57 static void fm10k_rx_queue_release(void *queue);
58 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
59 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
60 static int fm10k_check_ftag(struct rte_devargs *devargs);
61 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
62
63 struct fm10k_xstats_name_off {
64         char name[RTE_ETH_XSTATS_NAME_SIZE];
65         unsigned offset;
66 };
67
68 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
69         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
70         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
71         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
72         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
73         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
74         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
75         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
76         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
77                 nodesc_drop)},
78 };
79
80 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
81                 sizeof(fm10k_hw_stats_strings[0]))
82
83 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
84         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
85         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
86         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
87 };
88
89 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
90                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
91
92 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
93         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
94         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
95 };
96
97 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
98                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
99
100 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
101                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
102 static int
103 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
104
105 static void
106 fm10k_mbx_initlock(struct fm10k_hw *hw)
107 {
108         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
109 }
110
111 static void
112 fm10k_mbx_lock(struct fm10k_hw *hw)
113 {
114         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
115                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
116 }
117
118 static void
119 fm10k_mbx_unlock(struct fm10k_hw *hw)
120 {
121         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
122 }
123
124 /* Stubs needed for linkage when vPMD is disabled */
125 int __attribute__((weak))
126 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
127 {
128         return -1;
129 }
130
131 uint16_t __attribute__((weak))
132 fm10k_recv_pkts_vec(
133         __rte_unused void *rx_queue,
134         __rte_unused struct rte_mbuf **rx_pkts,
135         __rte_unused uint16_t nb_pkts)
136 {
137         return 0;
138 }
139
140 uint16_t __attribute__((weak))
141 fm10k_recv_scattered_pkts_vec(
142                 __rte_unused void *rx_queue,
143                 __rte_unused struct rte_mbuf **rx_pkts,
144                 __rte_unused uint16_t nb_pkts)
145 {
146         return 0;
147 }
148
149 int __attribute__((weak))
150 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
151
152 {
153         return -1;
154 }
155
156 void __attribute__((weak))
157 fm10k_rx_queue_release_mbufs_vec(
158                 __rte_unused struct fm10k_rx_queue *rxq)
159 {
160         return;
161 }
162
163 void __attribute__((weak))
164 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
165 {
166         return;
167 }
168
169 int __attribute__((weak))
170 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
171 {
172         return -1;
173 }
174
175 uint16_t __attribute__((weak))
176 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
177                            __rte_unused struct rte_mbuf **tx_pkts,
178                            __rte_unused uint16_t nb_pkts)
179 {
180         return 0;
181 }
182
183 /*
184  * reset queue to initial state, allocate software buffers used when starting
185  * device.
186  * return 0 on success
187  * return -ENOMEM if buffers cannot be allocated
188  * return -EINVAL if buffers do not satisfy alignment condition
189  */
190 static inline int
191 rx_queue_reset(struct fm10k_rx_queue *q)
192 {
193         static const union fm10k_rx_desc zero = {{0} };
194         uint64_t dma_addr;
195         int i, diag;
196         PMD_INIT_FUNC_TRACE();
197
198         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
199         if (diag != 0)
200                 return -ENOMEM;
201
202         for (i = 0; i < q->nb_desc; ++i) {
203                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
204                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
205                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
206                                                 q->nb_desc);
207                         return -EINVAL;
208                 }
209                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
210                 q->hw_ring[i].q.pkt_addr = dma_addr;
211                 q->hw_ring[i].q.hdr_addr = dma_addr;
212         }
213
214         /* initialize extra software ring entries. Space for these extra
215          * entries is always allocated.
216          */
217         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
218         for (i = 0; i < q->nb_fake_desc; ++i) {
219                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
220                 q->hw_ring[q->nb_desc + i] = zero;
221         }
222
223         q->next_dd = 0;
224         q->next_alloc = 0;
225         q->next_trigger = q->alloc_thresh - 1;
226         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
227         q->rxrearm_start = 0;
228         q->rxrearm_nb = 0;
229
230         return 0;
231 }
232
233 /*
234  * clean queue, descriptor rings, free software buffers used when stopping
235  * device.
236  */
237 static inline void
238 rx_queue_clean(struct fm10k_rx_queue *q)
239 {
240         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
241         uint32_t i;
242         PMD_INIT_FUNC_TRACE();
243
244         /* zero descriptor rings */
245         for (i = 0; i < q->nb_desc; ++i)
246                 q->hw_ring[i] = zero;
247
248         /* zero faked descriptors */
249         for (i = 0; i < q->nb_fake_desc; ++i)
250                 q->hw_ring[q->nb_desc + i] = zero;
251
252         /* vPMD driver has a different way of releasing mbufs. */
253         if (q->rx_using_sse) {
254                 fm10k_rx_queue_release_mbufs_vec(q);
255                 return;
256         }
257
258         /* free software buffers */
259         for (i = 0; i < q->nb_desc; ++i) {
260                 if (q->sw_ring[i]) {
261                         rte_pktmbuf_free_seg(q->sw_ring[i]);
262                         q->sw_ring[i] = NULL;
263                 }
264         }
265 }
266
267 /*
268  * free all queue memory used when releasing the queue (i.e. configure)
269  */
270 static inline void
271 rx_queue_free(struct fm10k_rx_queue *q)
272 {
273         PMD_INIT_FUNC_TRACE();
274         if (q) {
275                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
276                 rx_queue_clean(q);
277                 if (q->sw_ring) {
278                         rte_free(q->sw_ring);
279                         q->sw_ring = NULL;
280                 }
281                 rte_free(q);
282                 q = NULL;
283         }
284 }
285
286 /*
287  * disable RX queue, wait unitl HW finished necessary flush operation
288  */
289 static inline int
290 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
291 {
292         uint32_t reg, i;
293
294         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
295         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
296                         reg & ~FM10K_RXQCTL_ENABLE);
297
298         /* Wait 100us at most */
299         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
300                 rte_delay_us(1);
301                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
302                 if (!(reg & FM10K_RXQCTL_ENABLE))
303                         break;
304         }
305
306         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
307                 return -1;
308
309         return 0;
310 }
311
312 /*
313  * reset queue to initial state, allocate software buffers used when starting
314  * device
315  */
316 static inline void
317 tx_queue_reset(struct fm10k_tx_queue *q)
318 {
319         PMD_INIT_FUNC_TRACE();
320         q->last_free = 0;
321         q->next_free = 0;
322         q->nb_used = 0;
323         q->nb_free = q->nb_desc - 1;
324         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
325         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
326 }
327
328 /*
329  * clean queue, descriptor rings, free software buffers used when stopping
330  * device
331  */
332 static inline void
333 tx_queue_clean(struct fm10k_tx_queue *q)
334 {
335         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
336         uint32_t i;
337         PMD_INIT_FUNC_TRACE();
338
339         /* zero descriptor rings */
340         for (i = 0; i < q->nb_desc; ++i)
341                 q->hw_ring[i] = zero;
342
343         /* free software buffers */
344         for (i = 0; i < q->nb_desc; ++i) {
345                 if (q->sw_ring[i]) {
346                         rte_pktmbuf_free_seg(q->sw_ring[i]);
347                         q->sw_ring[i] = NULL;
348                 }
349         }
350 }
351
352 /*
353  * free all queue memory used when releasing the queue (i.e. configure)
354  */
355 static inline void
356 tx_queue_free(struct fm10k_tx_queue *q)
357 {
358         PMD_INIT_FUNC_TRACE();
359         if (q) {
360                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
361                 tx_queue_clean(q);
362                 if (q->rs_tracker.list) {
363                         rte_free(q->rs_tracker.list);
364                         q->rs_tracker.list = NULL;
365                 }
366                 if (q->sw_ring) {
367                         rte_free(q->sw_ring);
368                         q->sw_ring = NULL;
369                 }
370                 rte_free(q);
371                 q = NULL;
372         }
373 }
374
375 /*
376  * disable TX queue, wait unitl HW finished necessary flush operation
377  */
378 static inline int
379 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
380 {
381         uint32_t reg, i;
382
383         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
384         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
385                         reg & ~FM10K_TXDCTL_ENABLE);
386
387         /* Wait 100us at most */
388         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
389                 rte_delay_us(1);
390                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
391                 if (!(reg & FM10K_TXDCTL_ENABLE))
392                         break;
393         }
394
395         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
396                 return -1;
397
398         return 0;
399 }
400
401 static int
402 fm10k_check_mq_mode(struct rte_eth_dev *dev)
403 {
404         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
405         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
406         struct rte_eth_vmdq_rx_conf *vmdq_conf;
407         uint16_t nb_rx_q = dev->data->nb_rx_queues;
408
409         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
410
411         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
412                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
413                 return -EINVAL;
414         }
415
416         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
417                 return 0;
418
419         if (hw->mac.type == fm10k_mac_vf) {
420                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
421                 return -EINVAL;
422         }
423
424         /* Check VMDQ queue pool number */
425         if (vmdq_conf->nb_queue_pools >
426                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
427                         vmdq_conf->nb_queue_pools > nb_rx_q) {
428                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
429                         vmdq_conf->nb_queue_pools);
430                 return -EINVAL;
431         }
432
433         return 0;
434 }
435
436 static const struct fm10k_txq_ops def_txq_ops = {
437         .reset = tx_queue_reset,
438 };
439
440 static int
441 fm10k_dev_configure(struct rte_eth_dev *dev)
442 {
443         int ret;
444
445         PMD_INIT_FUNC_TRACE();
446
447         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
448                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
449         /* multipe queue mode checking */
450         ret  = fm10k_check_mq_mode(dev);
451         if (ret != 0) {
452                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
453                             ret);
454                 return ret;
455         }
456
457         return 0;
458 }
459
460 /* fls = find last set bit = 32 minus the number of leading zeros */
461 #ifndef fls
462 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
463 #endif
464
465 static void
466 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
467 {
468         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
469         struct rte_eth_vmdq_rx_conf *vmdq_conf;
470         uint32_t i;
471
472         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
473
474         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
475                 if (!vmdq_conf->pool_map[i].pools)
476                         continue;
477                 fm10k_mbx_lock(hw);
478                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
479                 fm10k_mbx_unlock(hw);
480         }
481 }
482
483 static void
484 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
485 {
486         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
487
488         /* Add default mac address */
489         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
490                 MAIN_VSI_POOL_NUMBER);
491 }
492
493 static void
494 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
495 {
496         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
497         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
498         uint32_t mrqc, *key, i, reta, j;
499         uint64_t hf;
500
501 #define RSS_KEY_SIZE 40
502         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
503                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
504                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
505                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
506                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
507                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
508         };
509
510         if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
511                 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
512                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
513                 return;
514         }
515
516         /* random key is rss_intel_key (default) or user provided (rss_key) */
517         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
518                 key = (uint32_t *)rss_intel_key;
519         else
520                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
521
522         /* Now fill our hash function seeds, 4 bytes at a time */
523         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
524                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
525
526         /*
527          * Fill in redirection table
528          * The byte-swap is needed because NIC registers are in
529          * little-endian order.
530          */
531         reta = 0;
532         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
533                 if (j == dev->data->nb_rx_queues)
534                         j = 0;
535                 reta = (reta << CHAR_BIT) | j;
536                 if ((i & 3) == 3)
537                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
538                                         rte_bswap32(reta));
539         }
540
541         /*
542          * Generate RSS hash based on packet types, TCP/UDP
543          * port numbers and/or IPv4/v6 src and dst addresses
544          */
545         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
546         mrqc = 0;
547         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
548         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
549         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
550         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
551         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
552         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
553         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
554         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
555         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
556
557         if (mrqc == 0) {
558                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
559                         "supported", hf);
560                 return;
561         }
562
563         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
564 }
565
566 static void
567 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
568 {
569         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
570         uint32_t i;
571
572         for (i = 0; i < nb_lport_new; i++) {
573                 /* Set unicast mode by default. App can change
574                  * to other mode in other API func.
575                  */
576                 fm10k_mbx_lock(hw);
577                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
578                         FM10K_XCAST_MODE_NONE);
579                 fm10k_mbx_unlock(hw);
580         }
581 }
582
583 static void
584 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
585 {
586         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
587         struct rte_eth_vmdq_rx_conf *vmdq_conf;
588         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
589         struct fm10k_macvlan_filter_info *macvlan;
590         uint16_t nb_queue_pools = 0; /* pool number in configuration */
591         uint16_t nb_lport_new;
592
593         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
594         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
595
596         fm10k_dev_rss_configure(dev);
597
598         /* only PF supports VMDQ */
599         if (hw->mac.type != fm10k_mac_pf)
600                 return;
601
602         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
603                 nb_queue_pools = vmdq_conf->nb_queue_pools;
604
605         /* no pool number change, no need to update logic port and VLAN/MAC */
606         if (macvlan->nb_queue_pools == nb_queue_pools)
607                 return;
608
609         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
610         fm10k_dev_logic_port_update(dev, nb_lport_new);
611
612         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
613         memset(dev->data->mac_addrs, 0,
614                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
615         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
616                 &dev->data->mac_addrs[0]);
617         memset(macvlan, 0, sizeof(*macvlan));
618         macvlan->nb_queue_pools = nb_queue_pools;
619
620         if (nb_queue_pools)
621                 fm10k_dev_vmdq_rx_configure(dev);
622         else
623                 fm10k_dev_pf_main_vsi_reset(dev);
624 }
625
626 static int
627 fm10k_dev_tx_init(struct rte_eth_dev *dev)
628 {
629         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
630         int i, ret;
631         struct fm10k_tx_queue *txq;
632         uint64_t base_addr;
633         uint32_t size;
634
635         /* Disable TXINT to avoid possible interrupt */
636         for (i = 0; i < hw->mac.max_queues; i++)
637                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
638                                 3 << FM10K_TXINT_TIMER_SHIFT);
639
640         /* Setup TX queue */
641         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
642                 txq = dev->data->tx_queues[i];
643                 base_addr = txq->hw_ring_phys_addr;
644                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
645
646                 /* disable queue to avoid issues while updating state */
647                 ret = tx_queue_disable(hw, i);
648                 if (ret) {
649                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
650                         return -1;
651                 }
652                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
653                  * register is read-only for VF.
654                  */
655                 if (fm10k_check_ftag(dev->device->devargs)) {
656                         if (hw->mac.type == fm10k_mac_pf) {
657                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
658                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
659                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
660                         } else {
661                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
662                                 return -ENOTSUP;
663                         }
664                 }
665
666                 /* set location and size for descriptor ring */
667                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
668                                 base_addr & UINT64_LOWER_32BITS_MASK);
669                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
670                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
671                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
672
673                 /* assign default SGLORT for each TX queue by PF */
674                 if (hw->mac.type == fm10k_mac_pf)
675                         FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
676         }
677
678         /* set up vector or scalar TX function as appropriate */
679         fm10k_set_tx_function(dev);
680
681         return 0;
682 }
683
684 static int
685 fm10k_dev_rx_init(struct rte_eth_dev *dev)
686 {
687         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
688         struct fm10k_macvlan_filter_info *macvlan;
689         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
690         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
691         int i, ret;
692         struct fm10k_rx_queue *rxq;
693         uint64_t base_addr;
694         uint32_t size;
695         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
696         uint32_t logic_port = hw->mac.dglort_map;
697         uint16_t buf_size;
698         uint16_t queue_stride = 0;
699
700         /* enable RXINT for interrupt mode */
701         i = 0;
702         if (rte_intr_dp_is_en(intr_handle)) {
703                 for (; i < dev->data->nb_rx_queues; i++) {
704                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
705                         if (hw->mac.type == fm10k_mac_pf)
706                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
707                                         FM10K_ITR_AUTOMASK |
708                                         FM10K_ITR_MASK_CLEAR);
709                         else
710                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
711                                         FM10K_ITR_AUTOMASK |
712                                         FM10K_ITR_MASK_CLEAR);
713                 }
714         }
715         /* Disable other RXINT to avoid possible interrupt */
716         for (; i < hw->mac.max_queues; i++)
717                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
718                         3 << FM10K_RXINT_TIMER_SHIFT);
719
720         /* Setup RX queues */
721         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
722                 rxq = dev->data->rx_queues[i];
723                 base_addr = rxq->hw_ring_phys_addr;
724                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
725
726                 /* disable queue to avoid issues while updating state */
727                 ret = rx_queue_disable(hw, i);
728                 if (ret) {
729                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
730                         return -1;
731                 }
732
733                 /* Setup the Base and Length of the Rx Descriptor Ring */
734                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
735                                 base_addr & UINT64_LOWER_32BITS_MASK);
736                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
737                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
738                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
739
740                 /* Configure the Rx buffer size for one buff without split */
741                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
742                         RTE_PKTMBUF_HEADROOM);
743                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
744                  * reserved for this purpose, and the worst case could be 511B.
745                  * But SRR reg assumes all buffers have the same size. In order
746                  * to fill the gap, we'll have to consider the worst case and
747                  * assume 512B is reserved. If we don't do so, it's possible
748                  * for HW to overwrite data to next mbuf.
749                  */
750                 buf_size -= FM10K_RX_DATABUF_ALIGN;
751
752                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
753                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
754                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
755
756                 /* It adds dual VLAN length for supporting dual VLAN */
757                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
758                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
759                         dev->data->dev_conf.rxmode.enable_scatter) {
760                         uint32_t reg;
761                         dev->data->scattered_rx = 1;
762                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
763                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
764                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
765                 }
766
767                 /* Enable drop on empty, it's RO for VF */
768                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
769                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
770
771                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
772                 FM10K_WRITE_FLUSH(hw);
773         }
774
775         /* Configure VMDQ/RSS if applicable */
776         fm10k_dev_mq_rx_configure(dev);
777
778         /* Decide the best RX function */
779         fm10k_set_rx_function(dev);
780
781         /* update RX_SGLORT for loopback suppress*/
782         if (hw->mac.type != fm10k_mac_pf)
783                 return 0;
784         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
785         if (macvlan->nb_queue_pools)
786                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
787         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
788                 if (i && queue_stride && !(i % queue_stride))
789                         logic_port++;
790                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
791         }
792
793         return 0;
794 }
795
796 static int
797 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
798 {
799         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800         int err = -1;
801         uint32_t reg;
802         struct fm10k_rx_queue *rxq;
803
804         PMD_INIT_FUNC_TRACE();
805
806         if (rx_queue_id < dev->data->nb_rx_queues) {
807                 rxq = dev->data->rx_queues[rx_queue_id];
808                 err = rx_queue_reset(rxq);
809                 if (err == -ENOMEM) {
810                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
811                         return err;
812                 } else if (err == -EINVAL) {
813                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
814                                 " %d", err);
815                         return err;
816                 }
817
818                 /* Setup the HW Rx Head and Tail Descriptor Pointers
819                  * Note: this must be done AFTER the queue is enabled on real
820                  * hardware, but BEFORE the queue is enabled when using the
821                  * emulation platform. Do it in both places for now and remove
822                  * this comment and the following two register writes when the
823                  * emulation platform is no longer being used.
824                  */
825                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
826                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
827
828                 /* Set PF ownership flag for PF devices */
829                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
830                 if (hw->mac.type == fm10k_mac_pf)
831                         reg |= FM10K_RXQCTL_PF;
832                 reg |= FM10K_RXQCTL_ENABLE;
833                 /* enable RX queue */
834                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
835                 FM10K_WRITE_FLUSH(hw);
836
837                 /* Setup the HW Rx Head and Tail Descriptor Pointers
838                  * Note: this must be done AFTER the queue is enabled
839                  */
840                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
841                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
842                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
843         }
844
845         return err;
846 }
847
848 static int
849 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
850 {
851         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852
853         PMD_INIT_FUNC_TRACE();
854
855         if (rx_queue_id < dev->data->nb_rx_queues) {
856                 /* Disable RX queue */
857                 rx_queue_disable(hw, rx_queue_id);
858
859                 /* Free mbuf and clean HW ring */
860                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
861                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
862         }
863
864         return 0;
865 }
866
867 static int
868 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
869 {
870         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
871         /** @todo - this should be defined in the shared code */
872 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
873         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
874         int err = 0;
875
876         PMD_INIT_FUNC_TRACE();
877
878         if (tx_queue_id < dev->data->nb_tx_queues) {
879                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
880
881                 q->ops->reset(q);
882
883                 /* reset head and tail pointers */
884                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
885                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
886
887                 /* enable TX queue */
888                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
889                                         FM10K_TXDCTL_ENABLE | txdctl);
890                 FM10K_WRITE_FLUSH(hw);
891                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
892         } else
893                 err = -1;
894
895         return err;
896 }
897
898 static int
899 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
900 {
901         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
902
903         PMD_INIT_FUNC_TRACE();
904
905         if (tx_queue_id < dev->data->nb_tx_queues) {
906                 tx_queue_disable(hw, tx_queue_id);
907                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
908                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
909         }
910
911         return 0;
912 }
913
914 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
915 {
916         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
917                 != FM10K_DGLORTMAP_NONE);
918 }
919
920 static void
921 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
922 {
923         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924         int status;
925
926         PMD_INIT_FUNC_TRACE();
927
928         /* Return if it didn't acquire valid glort range */
929         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
930                 return;
931
932         fm10k_mbx_lock(hw);
933         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
934                                 FM10K_XCAST_MODE_PROMISC);
935         fm10k_mbx_unlock(hw);
936
937         if (status != FM10K_SUCCESS)
938                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
939 }
940
941 static void
942 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
943 {
944         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
945         uint8_t mode;
946         int status;
947
948         PMD_INIT_FUNC_TRACE();
949
950         /* Return if it didn't acquire valid glort range */
951         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
952                 return;
953
954         if (dev->data->all_multicast == 1)
955                 mode = FM10K_XCAST_MODE_ALLMULTI;
956         else
957                 mode = FM10K_XCAST_MODE_NONE;
958
959         fm10k_mbx_lock(hw);
960         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
961                                 mode);
962         fm10k_mbx_unlock(hw);
963
964         if (status != FM10K_SUCCESS)
965                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
966 }
967
968 static void
969 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
970 {
971         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972         int status;
973
974         PMD_INIT_FUNC_TRACE();
975
976         /* Return if it didn't acquire valid glort range */
977         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
978                 return;
979
980         /* If promiscuous mode is enabled, it doesn't make sense to enable
981          * allmulticast and disable promiscuous since fm10k only can select
982          * one of the modes.
983          */
984         if (dev->data->promiscuous) {
985                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
986                         "needn't enable allmulticast");
987                 return;
988         }
989
990         fm10k_mbx_lock(hw);
991         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
992                                 FM10K_XCAST_MODE_ALLMULTI);
993         fm10k_mbx_unlock(hw);
994
995         if (status != FM10K_SUCCESS)
996                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
997 }
998
999 static void
1000 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1001 {
1002         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1003         int status;
1004
1005         PMD_INIT_FUNC_TRACE();
1006
1007         /* Return if it didn't acquire valid glort range */
1008         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1009                 return;
1010
1011         if (dev->data->promiscuous) {
1012                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1013                         "since promisc mode is enabled");
1014                 return;
1015         }
1016
1017         fm10k_mbx_lock(hw);
1018         /* Change mode to unicast mode */
1019         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1020                                 FM10K_XCAST_MODE_NONE);
1021         fm10k_mbx_unlock(hw);
1022
1023         if (status != FM10K_SUCCESS)
1024                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1025 }
1026
1027 static void
1028 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1029 {
1030         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1031         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1032         uint16_t nb_queue_pools;
1033         struct fm10k_macvlan_filter_info *macvlan;
1034
1035         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1036         nb_queue_pools = macvlan->nb_queue_pools;
1037         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1038         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1039
1040         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1041         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1042         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1043                         hw->mac.dglort_map;
1044         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1045         /* Configure VMDQ/RSS DGlort Decoder */
1046         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1047
1048         /* Flow Director configurations, only queue number is valid. */
1049         dglortdec = fls(dev->data->nb_rx_queues - 1);
1050         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1051                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1052         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1053         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1054
1055         /* Invalidate all other GLORT entries */
1056         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1057                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1058                                 FM10K_DGLORTMAP_NONE);
1059 }
1060
1061 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1062 static int
1063 fm10k_dev_start(struct rte_eth_dev *dev)
1064 {
1065         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066         int i, diag;
1067
1068         PMD_INIT_FUNC_TRACE();
1069
1070         /* stop, init, then start the hw */
1071         diag = fm10k_stop_hw(hw);
1072         if (diag != FM10K_SUCCESS) {
1073                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1074                 return -EIO;
1075         }
1076
1077         diag = fm10k_init_hw(hw);
1078         if (diag != FM10K_SUCCESS) {
1079                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1080                 return -EIO;
1081         }
1082
1083         diag = fm10k_start_hw(hw);
1084         if (diag != FM10K_SUCCESS) {
1085                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1086                 return -EIO;
1087         }
1088
1089         diag = fm10k_dev_tx_init(dev);
1090         if (diag) {
1091                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1092                 return diag;
1093         }
1094
1095         if (fm10k_dev_rxq_interrupt_setup(dev))
1096                 return -EIO;
1097
1098         diag = fm10k_dev_rx_init(dev);
1099         if (diag) {
1100                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1101                 return diag;
1102         }
1103
1104         if (hw->mac.type == fm10k_mac_pf)
1105                 fm10k_dev_dglort_map_configure(dev);
1106
1107         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1108                 struct fm10k_rx_queue *rxq;
1109                 rxq = dev->data->rx_queues[i];
1110
1111                 if (rxq->rx_deferred_start)
1112                         continue;
1113                 diag = fm10k_dev_rx_queue_start(dev, i);
1114                 if (diag != 0) {
1115                         int j;
1116                         for (j = 0; j < i; ++j)
1117                                 rx_queue_clean(dev->data->rx_queues[j]);
1118                         return diag;
1119                 }
1120         }
1121
1122         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1123                 struct fm10k_tx_queue *txq;
1124                 txq = dev->data->tx_queues[i];
1125
1126                 if (txq->tx_deferred_start)
1127                         continue;
1128                 diag = fm10k_dev_tx_queue_start(dev, i);
1129                 if (diag != 0) {
1130                         int j;
1131                         for (j = 0; j < i; ++j)
1132                                 tx_queue_clean(dev->data->tx_queues[j]);
1133                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1134                                 rx_queue_clean(dev->data->rx_queues[j]);
1135                         return diag;
1136                 }
1137         }
1138
1139         /* Update default vlan when not in VMDQ mode */
1140         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1141                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1142
1143         fm10k_link_update(dev, 0);
1144
1145         return 0;
1146 }
1147
1148 static void
1149 fm10k_dev_stop(struct rte_eth_dev *dev)
1150 {
1151         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1153         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1154         int i;
1155
1156         PMD_INIT_FUNC_TRACE();
1157
1158         if (dev->data->tx_queues)
1159                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1160                         fm10k_dev_tx_queue_stop(dev, i);
1161
1162         if (dev->data->rx_queues)
1163                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1164                         fm10k_dev_rx_queue_stop(dev, i);
1165
1166         /* Disable datapath event */
1167         if (rte_intr_dp_is_en(intr_handle)) {
1168                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1169                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1170                                 3 << FM10K_RXINT_TIMER_SHIFT);
1171                         if (hw->mac.type == fm10k_mac_pf)
1172                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1173                                         FM10K_ITR_MASK_SET);
1174                         else
1175                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1176                                         FM10K_ITR_MASK_SET);
1177                 }
1178         }
1179         /* Clean datapath event and queue/vec mapping */
1180         rte_intr_efd_disable(intr_handle);
1181         rte_free(intr_handle->intr_vec);
1182         intr_handle->intr_vec = NULL;
1183 }
1184
1185 static void
1186 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1187 {
1188         int i;
1189
1190         PMD_INIT_FUNC_TRACE();
1191
1192         if (dev->data->tx_queues) {
1193                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1194                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1195
1196                         tx_queue_free(txq);
1197                 }
1198         }
1199
1200         if (dev->data->rx_queues) {
1201                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1202                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1203         }
1204 }
1205
1206 static void
1207 fm10k_dev_close(struct rte_eth_dev *dev)
1208 {
1209         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1210
1211         PMD_INIT_FUNC_TRACE();
1212
1213         fm10k_mbx_lock(hw);
1214         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1215                 MAX_LPORT_NUM, false);
1216         fm10k_mbx_unlock(hw);
1217
1218         /* allow 100ms for device to quiesce */
1219         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1220
1221         /* Stop mailbox service first */
1222         fm10k_close_mbx_service(hw);
1223         fm10k_dev_stop(dev);
1224         fm10k_dev_queue_release(dev);
1225         fm10k_stop_hw(hw);
1226 }
1227
1228 static int
1229 fm10k_link_update(struct rte_eth_dev *dev,
1230         __rte_unused int wait_to_complete)
1231 {
1232         struct fm10k_dev_info *dev_info =
1233                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1234         PMD_INIT_FUNC_TRACE();
1235
1236         /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1237          * leave the speed undefined since there is no 50Gbps Ethernet.
1238          */
1239         dev->data->dev_link.link_speed  = 0;
1240         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1241         dev->data->dev_link.link_status =
1242                 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1243
1244         return 0;
1245 }
1246
1247 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1248         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1249 {
1250         unsigned i, q;
1251         unsigned count = 0;
1252
1253         if (xstats_names != NULL) {
1254                 /* Note: limit checked in rte_eth_xstats_names() */
1255
1256                 /* Global stats */
1257                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1258                         snprintf(xstats_names[count].name,
1259                                 sizeof(xstats_names[count].name),
1260                                 "%s", fm10k_hw_stats_strings[count].name);
1261                         count++;
1262                 }
1263
1264                 /* PF queue stats */
1265                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1266                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1267                                 snprintf(xstats_names[count].name,
1268                                         sizeof(xstats_names[count].name),
1269                                         "rx_q%u_%s", q,
1270                                         fm10k_hw_stats_rx_q_strings[i].name);
1271                                 count++;
1272                         }
1273                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1274                                 snprintf(xstats_names[count].name,
1275                                         sizeof(xstats_names[count].name),
1276                                         "tx_q%u_%s", q,
1277                                         fm10k_hw_stats_tx_q_strings[i].name);
1278                                 count++;
1279                         }
1280                 }
1281         }
1282         return FM10K_NB_XSTATS;
1283 }
1284
1285 static int
1286 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1287                  unsigned n)
1288 {
1289         struct fm10k_hw_stats *hw_stats =
1290                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1291         unsigned i, q, count = 0;
1292
1293         if (n < FM10K_NB_XSTATS)
1294                 return FM10K_NB_XSTATS;
1295
1296         /* Global stats */
1297         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1298                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1299                         fm10k_hw_stats_strings[count].offset);
1300                 xstats[count].id = count;
1301                 count++;
1302         }
1303
1304         /* PF queue stats */
1305         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1306                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1307                         xstats[count].value =
1308                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1309                                 fm10k_hw_stats_rx_q_strings[i].offset);
1310                         xstats[count].id = count;
1311                         count++;
1312                 }
1313                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1314                         xstats[count].value =
1315                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1316                                 fm10k_hw_stats_tx_q_strings[i].offset);
1317                         xstats[count].id = count;
1318                         count++;
1319                 }
1320         }
1321
1322         return FM10K_NB_XSTATS;
1323 }
1324
1325 static int
1326 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1327 {
1328         uint64_t ipackets, opackets, ibytes, obytes;
1329         struct fm10k_hw *hw =
1330                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331         struct fm10k_hw_stats *hw_stats =
1332                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1333         int i;
1334
1335         PMD_INIT_FUNC_TRACE();
1336
1337         fm10k_update_hw_stats(hw, hw_stats);
1338
1339         ipackets = opackets = ibytes = obytes = 0;
1340         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1341                 (i < hw->mac.max_queues); ++i) {
1342                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1343                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1344                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1345                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1346                 ipackets += stats->q_ipackets[i];
1347                 opackets += stats->q_opackets[i];
1348                 ibytes   += stats->q_ibytes[i];
1349                 obytes   += stats->q_obytes[i];
1350         }
1351         stats->ipackets = ipackets;
1352         stats->opackets = opackets;
1353         stats->ibytes = ibytes;
1354         stats->obytes = obytes;
1355         return 0;
1356 }
1357
1358 static void
1359 fm10k_stats_reset(struct rte_eth_dev *dev)
1360 {
1361         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362         struct fm10k_hw_stats *hw_stats =
1363                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1364
1365         PMD_INIT_FUNC_TRACE();
1366
1367         memset(hw_stats, 0, sizeof(*hw_stats));
1368         fm10k_rebind_hw_stats(hw, hw_stats);
1369 }
1370
1371 static void
1372 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1373         struct rte_eth_dev_info *dev_info)
1374 {
1375         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1376         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1377
1378         PMD_INIT_FUNC_TRACE();
1379
1380         dev_info->pci_dev            = pdev;
1381         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1382         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1383         dev_info->max_rx_queues      = hw->mac.max_queues;
1384         dev_info->max_tx_queues      = hw->mac.max_queues;
1385         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1386         dev_info->max_hash_mac_addrs = 0;
1387         dev_info->max_vfs            = pdev->max_vfs;
1388         dev_info->vmdq_pool_base     = 0;
1389         dev_info->vmdq_queue_base    = 0;
1390         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1391         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1392         dev_info->rx_offload_capa =
1393                 DEV_RX_OFFLOAD_VLAN_STRIP |
1394                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1395                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1396                 DEV_RX_OFFLOAD_TCP_CKSUM;
1397         dev_info->tx_offload_capa =
1398                 DEV_TX_OFFLOAD_VLAN_INSERT |
1399                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1400                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1401                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1402                 DEV_TX_OFFLOAD_TCP_TSO;
1403
1404         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1405         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1406
1407         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1408                 .rx_thresh = {
1409                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1410                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1411                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1412                 },
1413                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1414                 .rx_drop_en = 0,
1415         };
1416
1417         dev_info->default_txconf = (struct rte_eth_txconf) {
1418                 .tx_thresh = {
1419                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1420                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1421                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1422                 },
1423                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1424                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1425                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1426         };
1427
1428         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1429                 .nb_max = FM10K_MAX_RX_DESC,
1430                 .nb_min = FM10K_MIN_RX_DESC,
1431                 .nb_align = FM10K_MULT_RX_DESC,
1432         };
1433
1434         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1435                 .nb_max = FM10K_MAX_TX_DESC,
1436                 .nb_min = FM10K_MIN_TX_DESC,
1437                 .nb_align = FM10K_MULT_TX_DESC,
1438                 .nb_seg_max = FM10K_TX_MAX_SEG,
1439                 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1440         };
1441
1442         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1443                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1444                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1445 }
1446
1447 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1448 static const uint32_t *
1449 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1450 {
1451         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1452             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1453                 static uint32_t ptypes[] = {
1454                         /* refers to rx_desc_to_ol_flags() */
1455                         RTE_PTYPE_L2_ETHER,
1456                         RTE_PTYPE_L3_IPV4,
1457                         RTE_PTYPE_L3_IPV4_EXT,
1458                         RTE_PTYPE_L3_IPV6,
1459                         RTE_PTYPE_L3_IPV6_EXT,
1460                         RTE_PTYPE_L4_TCP,
1461                         RTE_PTYPE_L4_UDP,
1462                         RTE_PTYPE_UNKNOWN
1463                 };
1464
1465                 return ptypes;
1466         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1467                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1468                 static uint32_t ptypes_vec[] = {
1469                         /* refers to fm10k_desc_to_pktype_v() */
1470                         RTE_PTYPE_L3_IPV4,
1471                         RTE_PTYPE_L3_IPV4_EXT,
1472                         RTE_PTYPE_L3_IPV6,
1473                         RTE_PTYPE_L3_IPV6_EXT,
1474                         RTE_PTYPE_L4_TCP,
1475                         RTE_PTYPE_L4_UDP,
1476                         RTE_PTYPE_TUNNEL_GENEVE,
1477                         RTE_PTYPE_TUNNEL_NVGRE,
1478                         RTE_PTYPE_TUNNEL_VXLAN,
1479                         RTE_PTYPE_TUNNEL_GRE,
1480                         RTE_PTYPE_UNKNOWN
1481                 };
1482
1483                 return ptypes_vec;
1484         }
1485
1486         return NULL;
1487 }
1488 #else
1489 static const uint32_t *
1490 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1491 {
1492         return NULL;
1493 }
1494 #endif
1495
1496 static int
1497 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1498 {
1499         s32 result;
1500         uint16_t mac_num = 0;
1501         uint32_t vid_idx, vid_bit, mac_index;
1502         struct fm10k_hw *hw;
1503         struct fm10k_macvlan_filter_info *macvlan;
1504         struct rte_eth_dev_data *data = dev->data;
1505
1506         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1507         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1508
1509         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1510                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1511                 return -EINVAL;
1512         }
1513
1514         if (vlan_id > ETH_VLAN_ID_MAX) {
1515                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1516                 return -EINVAL;
1517         }
1518
1519         vid_idx = FM10K_VFTA_IDX(vlan_id);
1520         vid_bit = FM10K_VFTA_BIT(vlan_id);
1521         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1522         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1523                 return 0;
1524         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1525         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1526                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1527                         "in the VLAN filter table");
1528                 return -EINVAL;
1529         }
1530
1531         fm10k_mbx_lock(hw);
1532         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1533         fm10k_mbx_unlock(hw);
1534         if (result != FM10K_SUCCESS) {
1535                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1536                 return -EIO;
1537         }
1538
1539         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1540                         (result == FM10K_SUCCESS); mac_index++) {
1541                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1542                         continue;
1543                 if (mac_num > macvlan->mac_num - 1) {
1544                         PMD_INIT_LOG(ERR, "MAC address number "
1545                                         "not match");
1546                         break;
1547                 }
1548                 fm10k_mbx_lock(hw);
1549                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1550                         data->mac_addrs[mac_index].addr_bytes,
1551                         vlan_id, on, 0);
1552                 fm10k_mbx_unlock(hw);
1553                 mac_num++;
1554         }
1555         if (result != FM10K_SUCCESS) {
1556                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1557                 return -EIO;
1558         }
1559
1560         if (on) {
1561                 macvlan->vlan_num++;
1562                 macvlan->vfta[vid_idx] |= vid_bit;
1563         } else {
1564                 macvlan->vlan_num--;
1565                 macvlan->vfta[vid_idx] &= ~vid_bit;
1566         }
1567         return 0;
1568 }
1569
1570 static int
1571 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1572 {
1573         if (mask & ETH_VLAN_STRIP_MASK) {
1574                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1575                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1576                                         "always on in fm10k");
1577         }
1578
1579         if (mask & ETH_VLAN_EXTEND_MASK) {
1580                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1581                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1582                                         "supported in fm10k");
1583         }
1584
1585         if (mask & ETH_VLAN_FILTER_MASK) {
1586                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1587                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1588         }
1589
1590         return 0;
1591 }
1592
1593 /* Add/Remove a MAC address, and update filters to main VSI */
1594 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1595                 const u8 *mac, bool add, uint32_t pool)
1596 {
1597         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598         struct fm10k_macvlan_filter_info *macvlan;
1599         uint32_t i, j, k;
1600
1601         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1602
1603         if (pool != MAIN_VSI_POOL_NUMBER) {
1604                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1605                         "mac to pool %u", pool);
1606                 return;
1607         }
1608         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1609                 if (!macvlan->vfta[j])
1610                         continue;
1611                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1612                         if (!(macvlan->vfta[j] & (1 << k)))
1613                                 continue;
1614                         if (i + 1 > macvlan->vlan_num) {
1615                                 PMD_INIT_LOG(ERR, "vlan number not match");
1616                                 return;
1617                         }
1618                         fm10k_mbx_lock(hw);
1619                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1620                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1621                         fm10k_mbx_unlock(hw);
1622                         i++;
1623                 }
1624         }
1625 }
1626
1627 /* Add/Remove a MAC address, and update filters to VMDQ */
1628 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1629                 const u8 *mac, bool add, uint32_t pool)
1630 {
1631         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632         struct fm10k_macvlan_filter_info *macvlan;
1633         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1634         uint32_t i;
1635
1636         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1637         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1638
1639         if (pool > macvlan->nb_queue_pools) {
1640                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1641                         " Max pool is %u",
1642                         pool, macvlan->nb_queue_pools);
1643                 return;
1644         }
1645         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1646                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1647                         continue;
1648                 fm10k_mbx_lock(hw);
1649                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1650                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1651                 fm10k_mbx_unlock(hw);
1652         }
1653 }
1654
1655 /* Add/Remove a MAC address, and update filters */
1656 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1657                 const u8 *mac, bool add, uint32_t pool)
1658 {
1659         struct fm10k_macvlan_filter_info *macvlan;
1660
1661         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1662
1663         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1664                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1665         else
1666                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1667
1668         if (add)
1669                 macvlan->mac_num++;
1670         else
1671                 macvlan->mac_num--;
1672 }
1673
1674 /* Add a MAC address, and update filters */
1675 static int
1676 fm10k_macaddr_add(struct rte_eth_dev *dev,
1677                 struct ether_addr *mac_addr,
1678                 uint32_t index,
1679                 uint32_t pool)
1680 {
1681         struct fm10k_macvlan_filter_info *macvlan;
1682
1683         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1684         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1685         macvlan->mac_vmdq_id[index] = pool;
1686         return 0;
1687 }
1688
1689 /* Remove a MAC address, and update filters */
1690 static void
1691 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1692 {
1693         struct rte_eth_dev_data *data = dev->data;
1694         struct fm10k_macvlan_filter_info *macvlan;
1695
1696         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1697         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1698                         FALSE, macvlan->mac_vmdq_id[index]);
1699         macvlan->mac_vmdq_id[index] = 0;
1700 }
1701
1702 static inline int
1703 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1704 {
1705         if ((request < min) || (request > max) || ((request % mult) != 0))
1706                 return -1;
1707         else
1708                 return 0;
1709 }
1710
1711
1712 static inline int
1713 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1714 {
1715         if ((request < min) || (request > max) || ((div % request) != 0))
1716                 return -1;
1717         else
1718                 return 0;
1719 }
1720
1721 static inline int
1722 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1723 {
1724         uint16_t rx_free_thresh;
1725
1726         if (conf->rx_free_thresh == 0)
1727                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1728         else
1729                 rx_free_thresh = conf->rx_free_thresh;
1730
1731         /* make sure the requested threshold satisfies the constraints */
1732         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1733                         FM10K_RX_FREE_THRESH_MAX(q),
1734                         FM10K_RX_FREE_THRESH_DIV(q),
1735                         rx_free_thresh)) {
1736                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1737                         "less than or equal to %u, "
1738                         "greater than or equal to %u, "
1739                         "and a divisor of %u",
1740                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1741                         FM10K_RX_FREE_THRESH_MIN(q),
1742                         FM10K_RX_FREE_THRESH_DIV(q));
1743                 return -EINVAL;
1744         }
1745
1746         q->alloc_thresh = rx_free_thresh;
1747         q->drop_en = conf->rx_drop_en;
1748         q->rx_deferred_start = conf->rx_deferred_start;
1749
1750         return 0;
1751 }
1752
1753 /*
1754  * Hardware requires specific alignment for Rx packet buffers. At
1755  * least one of the following two conditions must be satisfied.
1756  *  1. Address is 512B aligned
1757  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1758  *
1759  * As such, the driver may need to adjust the DMA address within the
1760  * buffer by up to 512B.
1761  *
1762  * return 1 if the element size is valid, otherwise return 0.
1763  */
1764 static int
1765 mempool_element_size_valid(struct rte_mempool *mp)
1766 {
1767         uint32_t min_size;
1768
1769         /* elt_size includes mbuf header and headroom */
1770         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1771                         RTE_PKTMBUF_HEADROOM;
1772
1773         /* account for up to 512B of alignment */
1774         min_size -= FM10K_RX_DATABUF_ALIGN;
1775
1776         /* sanity check for overflow */
1777         if (min_size > mp->elt_size)
1778                 return 0;
1779
1780         /* size is valid */
1781         return 1;
1782 }
1783
1784 static int
1785 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1786         uint16_t nb_desc, unsigned int socket_id,
1787         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1788 {
1789         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790         struct fm10k_dev_info *dev_info =
1791                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1792         struct fm10k_rx_queue *q;
1793         const struct rte_memzone *mz;
1794
1795         PMD_INIT_FUNC_TRACE();
1796
1797         /* make sure the mempool element size can account for alignment. */
1798         if (!mempool_element_size_valid(mp)) {
1799                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1800                 return -EINVAL;
1801         }
1802
1803         /* make sure a valid number of descriptors have been requested */
1804         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1805                                 FM10K_MULT_RX_DESC, nb_desc)) {
1806                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1807                         "less than or equal to %"PRIu32", "
1808                         "greater than or equal to %u, "
1809                         "and a multiple of %u",
1810                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1811                         FM10K_MULT_RX_DESC);
1812                 return -EINVAL;
1813         }
1814
1815         /*
1816          * if this queue existed already, free the associated memory. The
1817          * queue cannot be reused in case we need to allocate memory on
1818          * different socket than was previously used.
1819          */
1820         if (dev->data->rx_queues[queue_id] != NULL) {
1821                 rx_queue_free(dev->data->rx_queues[queue_id]);
1822                 dev->data->rx_queues[queue_id] = NULL;
1823         }
1824
1825         /* allocate memory for the queue structure */
1826         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1827                                 socket_id);
1828         if (q == NULL) {
1829                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1830                 return -ENOMEM;
1831         }
1832
1833         /* setup queue */
1834         q->mp = mp;
1835         q->nb_desc = nb_desc;
1836         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1837         q->port_id = dev->data->port_id;
1838         q->queue_id = queue_id;
1839         q->tail_ptr = (volatile uint32_t *)
1840                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1841         if (handle_rxconf(q, conf))
1842                 return -EINVAL;
1843
1844         /* allocate memory for the software ring */
1845         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1846                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1847                         RTE_CACHE_LINE_SIZE, socket_id);
1848         if (q->sw_ring == NULL) {
1849                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1850                 rte_free(q);
1851                 return -ENOMEM;
1852         }
1853
1854         /*
1855          * allocate memory for the hardware descriptor ring. A memzone large
1856          * enough to hold the maximum ring size is requested to allow for
1857          * resizing in later calls to the queue setup function.
1858          */
1859         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1860                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1861                                       socket_id);
1862         if (mz == NULL) {
1863                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1864                 rte_free(q->sw_ring);
1865                 rte_free(q);
1866                 return -ENOMEM;
1867         }
1868         q->hw_ring = mz->addr;
1869         q->hw_ring_phys_addr = mz->iova;
1870
1871         /* Check if number of descs satisfied Vector requirement */
1872         if (!rte_is_power_of_2(nb_desc)) {
1873                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1874                                     "preconditions - canceling the feature for "
1875                                     "the whole port[%d]",
1876                              q->queue_id, q->port_id);
1877                 dev_info->rx_vec_allowed = false;
1878         } else
1879                 fm10k_rxq_vec_setup(q);
1880
1881         dev->data->rx_queues[queue_id] = q;
1882         return 0;
1883 }
1884
1885 static void
1886 fm10k_rx_queue_release(void *queue)
1887 {
1888         PMD_INIT_FUNC_TRACE();
1889
1890         rx_queue_free(queue);
1891 }
1892
1893 static inline int
1894 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1895 {
1896         uint16_t tx_free_thresh;
1897         uint16_t tx_rs_thresh;
1898
1899         /* constraint MACROs require that tx_free_thresh is configured
1900          * before tx_rs_thresh */
1901         if (conf->tx_free_thresh == 0)
1902                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1903         else
1904                 tx_free_thresh = conf->tx_free_thresh;
1905
1906         /* make sure the requested threshold satisfies the constraints */
1907         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1908                         FM10K_TX_FREE_THRESH_MAX(q),
1909                         FM10K_TX_FREE_THRESH_DIV(q),
1910                         tx_free_thresh)) {
1911                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1912                         "less than or equal to %u, "
1913                         "greater than or equal to %u, "
1914                         "and a divisor of %u",
1915                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1916                         FM10K_TX_FREE_THRESH_MIN(q),
1917                         FM10K_TX_FREE_THRESH_DIV(q));
1918                 return -EINVAL;
1919         }
1920
1921         q->free_thresh = tx_free_thresh;
1922
1923         if (conf->tx_rs_thresh == 0)
1924                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1925         else
1926                 tx_rs_thresh = conf->tx_rs_thresh;
1927
1928         q->tx_deferred_start = conf->tx_deferred_start;
1929
1930         /* make sure the requested threshold satisfies the constraints */
1931         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1932                         FM10K_TX_RS_THRESH_MAX(q),
1933                         FM10K_TX_RS_THRESH_DIV(q),
1934                         tx_rs_thresh)) {
1935                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1936                         "less than or equal to %u, "
1937                         "greater than or equal to %u, "
1938                         "and a divisor of %u",
1939                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1940                         FM10K_TX_RS_THRESH_MIN(q),
1941                         FM10K_TX_RS_THRESH_DIV(q));
1942                 return -EINVAL;
1943         }
1944
1945         q->rs_thresh = tx_rs_thresh;
1946
1947         return 0;
1948 }
1949
1950 static int
1951 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1952         uint16_t nb_desc, unsigned int socket_id,
1953         const struct rte_eth_txconf *conf)
1954 {
1955         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1956         struct fm10k_tx_queue *q;
1957         const struct rte_memzone *mz;
1958
1959         PMD_INIT_FUNC_TRACE();
1960
1961         /* make sure a valid number of descriptors have been requested */
1962         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1963                                 FM10K_MULT_TX_DESC, nb_desc)) {
1964                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1965                         "less than or equal to %"PRIu32", "
1966                         "greater than or equal to %u, "
1967                         "and a multiple of %u",
1968                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1969                         FM10K_MULT_TX_DESC);
1970                 return -EINVAL;
1971         }
1972
1973         /*
1974          * if this queue existed already, free the associated memory. The
1975          * queue cannot be reused in case we need to allocate memory on
1976          * different socket than was previously used.
1977          */
1978         if (dev->data->tx_queues[queue_id] != NULL) {
1979                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1980
1981                 tx_queue_free(txq);
1982                 dev->data->tx_queues[queue_id] = NULL;
1983         }
1984
1985         /* allocate memory for the queue structure */
1986         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1987                                 socket_id);
1988         if (q == NULL) {
1989                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1990                 return -ENOMEM;
1991         }
1992
1993         /* setup queue */
1994         q->nb_desc = nb_desc;
1995         q->port_id = dev->data->port_id;
1996         q->queue_id = queue_id;
1997         q->txq_flags = conf->txq_flags;
1998         q->ops = &def_txq_ops;
1999         q->tail_ptr = (volatile uint32_t *)
2000                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2001         if (handle_txconf(q, conf))
2002                 return -EINVAL;
2003
2004         /* allocate memory for the software ring */
2005         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2006                                         nb_desc * sizeof(struct rte_mbuf *),
2007                                         RTE_CACHE_LINE_SIZE, socket_id);
2008         if (q->sw_ring == NULL) {
2009                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2010                 rte_free(q);
2011                 return -ENOMEM;
2012         }
2013
2014         /*
2015          * allocate memory for the hardware descriptor ring. A memzone large
2016          * enough to hold the maximum ring size is requested to allow for
2017          * resizing in later calls to the queue setup function.
2018          */
2019         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2020                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2021                                       socket_id);
2022         if (mz == NULL) {
2023                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2024                 rte_free(q->sw_ring);
2025                 rte_free(q);
2026                 return -ENOMEM;
2027         }
2028         q->hw_ring = mz->addr;
2029         q->hw_ring_phys_addr = mz->iova;
2030
2031         /*
2032          * allocate memory for the RS bit tracker. Enough slots to hold the
2033          * descriptor index for each RS bit needing to be set are required.
2034          */
2035         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2036                                 ((nb_desc + 1) / q->rs_thresh) *
2037                                 sizeof(uint16_t),
2038                                 RTE_CACHE_LINE_SIZE, socket_id);
2039         if (q->rs_tracker.list == NULL) {
2040                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2041                 rte_free(q->sw_ring);
2042                 rte_free(q);
2043                 return -ENOMEM;
2044         }
2045
2046         dev->data->tx_queues[queue_id] = q;
2047         return 0;
2048 }
2049
2050 static void
2051 fm10k_tx_queue_release(void *queue)
2052 {
2053         struct fm10k_tx_queue *q = queue;
2054         PMD_INIT_FUNC_TRACE();
2055
2056         tx_queue_free(q);
2057 }
2058
2059 static int
2060 fm10k_reta_update(struct rte_eth_dev *dev,
2061                         struct rte_eth_rss_reta_entry64 *reta_conf,
2062                         uint16_t reta_size)
2063 {
2064         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065         uint16_t i, j, idx, shift;
2066         uint8_t mask;
2067         uint32_t reta;
2068
2069         PMD_INIT_FUNC_TRACE();
2070
2071         if (reta_size > FM10K_MAX_RSS_INDICES) {
2072                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2073                         "(%d) doesn't match the number hardware can supported "
2074                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2075                 return -EINVAL;
2076         }
2077
2078         /*
2079          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2080          * 128-entries in 32 registers
2081          */
2082         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2083                 idx = i / RTE_RETA_GROUP_SIZE;
2084                 shift = i % RTE_RETA_GROUP_SIZE;
2085                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2086                                 BIT_MASK_PER_UINT32);
2087                 if (mask == 0)
2088                         continue;
2089
2090                 reta = 0;
2091                 if (mask != BIT_MASK_PER_UINT32)
2092                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2093
2094                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2095                         if (mask & (0x1 << j)) {
2096                                 if (mask != 0xF)
2097                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2098                                 reta |= reta_conf[idx].reta[shift + j] <<
2099                                                 (CHAR_BIT * j);
2100                         }
2101                 }
2102                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2103         }
2104
2105         return 0;
2106 }
2107
2108 static int
2109 fm10k_reta_query(struct rte_eth_dev *dev,
2110                         struct rte_eth_rss_reta_entry64 *reta_conf,
2111                         uint16_t reta_size)
2112 {
2113         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2114         uint16_t i, j, idx, shift;
2115         uint8_t mask;
2116         uint32_t reta;
2117
2118         PMD_INIT_FUNC_TRACE();
2119
2120         if (reta_size < FM10K_MAX_RSS_INDICES) {
2121                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2122                         "(%d) doesn't match the number hardware can supported "
2123                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2124                 return -EINVAL;
2125         }
2126
2127         /*
2128          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2129          * 128-entries in 32 registers
2130          */
2131         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2132                 idx = i / RTE_RETA_GROUP_SIZE;
2133                 shift = i % RTE_RETA_GROUP_SIZE;
2134                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2135                                 BIT_MASK_PER_UINT32);
2136                 if (mask == 0)
2137                         continue;
2138
2139                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2140                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2141                         if (mask & (0x1 << j))
2142                                 reta_conf[idx].reta[shift + j] = ((reta >>
2143                                         CHAR_BIT * j) & UINT8_MAX);
2144                 }
2145         }
2146
2147         return 0;
2148 }
2149
2150 static int
2151 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2152         struct rte_eth_rss_conf *rss_conf)
2153 {
2154         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2156         uint32_t mrqc;
2157         uint64_t hf = rss_conf->rss_hf;
2158         int i;
2159
2160         PMD_INIT_FUNC_TRACE();
2161
2162         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2163                                 FM10K_RSSRK_ENTRIES_PER_REG))
2164                 return -EINVAL;
2165
2166         if (hf == 0)
2167                 return -EINVAL;
2168
2169         mrqc = 0;
2170         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2171         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2172         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2173         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2174         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2175         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2176         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2177         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2178         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2179
2180         /* If the mapping doesn't fit any supported, return */
2181         if (mrqc == 0)
2182                 return -EINVAL;
2183
2184         if (key != NULL)
2185                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2186                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2187
2188         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2189
2190         return 0;
2191 }
2192
2193 static int
2194 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2195         struct rte_eth_rss_conf *rss_conf)
2196 {
2197         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2198         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2199         uint32_t mrqc;
2200         uint64_t hf;
2201         int i;
2202
2203         PMD_INIT_FUNC_TRACE();
2204
2205         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2206                                 FM10K_RSSRK_ENTRIES_PER_REG))
2207                 return -EINVAL;
2208
2209         if (key != NULL)
2210                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2211                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2212
2213         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2214         hf = 0;
2215         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2216         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2217         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2218         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2219         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2220         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2221         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2222         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2223         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2224
2225         rss_conf->rss_hf = hf;
2226
2227         return 0;
2228 }
2229
2230 static void
2231 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2232 {
2233         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2234         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2235
2236         /* Bind all local non-queue interrupt to vector 0 */
2237         int_map |= FM10K_MISC_VEC_ID;
2238
2239         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2240         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2241         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2242         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2243         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2244         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2245
2246         /* Enable misc causes */
2247         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2248                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2249                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2250                                 FM10K_EIMR_ENABLE(MAILBOX) |
2251                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2252                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2253                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2254                                 FM10K_EIMR_ENABLE(VFLR));
2255
2256         /* Enable ITR 0 */
2257         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2258                                         FM10K_ITR_MASK_CLEAR);
2259         FM10K_WRITE_FLUSH(hw);
2260 }
2261
2262 static void
2263 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2264 {
2265         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2266         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2267
2268         int_map |= FM10K_MISC_VEC_ID;
2269
2270         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2271         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2272         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2273         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2274         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2275         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2276
2277         /* Disable misc causes */
2278         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2279                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2280                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2281                                 FM10K_EIMR_DISABLE(MAILBOX) |
2282                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2283                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2284                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2285                                 FM10K_EIMR_DISABLE(VFLR));
2286
2287         /* Disable ITR 0 */
2288         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2289         FM10K_WRITE_FLUSH(hw);
2290 }
2291
2292 static void
2293 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2294 {
2295         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2296         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2297
2298         /* Bind all local non-queue interrupt to vector 0 */
2299         int_map |= FM10K_MISC_VEC_ID;
2300
2301         /* Only INT 0 available, other 15 are reserved. */
2302         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2303
2304         /* Enable ITR 0 */
2305         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2306                                         FM10K_ITR_MASK_CLEAR);
2307         FM10K_WRITE_FLUSH(hw);
2308 }
2309
2310 static void
2311 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2312 {
2313         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2314         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2315
2316         int_map |= FM10K_MISC_VEC_ID;
2317
2318         /* Only INT 0 available, other 15 are reserved. */
2319         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2320
2321         /* Disable ITR 0 */
2322         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2323         FM10K_WRITE_FLUSH(hw);
2324 }
2325
2326 static int
2327 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2328 {
2329         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2330         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2331
2332         /* Enable ITR */
2333         if (hw->mac.type == fm10k_mac_pf)
2334                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2335                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2336         else
2337                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2338                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2339         rte_intr_enable(&pdev->intr_handle);
2340         return 0;
2341 }
2342
2343 static int
2344 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2345 {
2346         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2347         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2348
2349         /* Disable ITR */
2350         if (hw->mac.type == fm10k_mac_pf)
2351                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2352                         FM10K_ITR_MASK_SET);
2353         else
2354                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2355                         FM10K_ITR_MASK_SET);
2356         return 0;
2357 }
2358
2359 static int
2360 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2361 {
2362         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2364         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2365         uint32_t intr_vector, vec;
2366         uint16_t queue_id;
2367         int result = 0;
2368
2369         /* fm10k needs one separate interrupt for mailbox,
2370          * so only drivers which support multiple interrupt vectors
2371          * e.g. vfio-pci can work for fm10k interrupt mode
2372          */
2373         if (!rte_intr_cap_multiple(intr_handle) ||
2374                         dev->data->dev_conf.intr_conf.rxq == 0)
2375                 return result;
2376
2377         intr_vector = dev->data->nb_rx_queues;
2378
2379         /* disable interrupt first */
2380         rte_intr_disable(intr_handle);
2381         if (hw->mac.type == fm10k_mac_pf)
2382                 fm10k_dev_disable_intr_pf(dev);
2383         else
2384                 fm10k_dev_disable_intr_vf(dev);
2385
2386         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2387                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2388                 result = -EIO;
2389         }
2390
2391         if (rte_intr_dp_is_en(intr_handle) && !result) {
2392                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2393                         dev->data->nb_rx_queues * sizeof(int), 0);
2394                 if (intr_handle->intr_vec) {
2395                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2396                                         queue_id < dev->data->nb_rx_queues;
2397                                         queue_id++) {
2398                                 intr_handle->intr_vec[queue_id] = vec;
2399                                 if (vec < intr_handle->nb_efd - 1
2400                                                 + FM10K_RX_VEC_START)
2401                                         vec++;
2402                         }
2403                 } else {
2404                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2405                                 " intr_vec", dev->data->nb_rx_queues);
2406                         rte_intr_efd_disable(intr_handle);
2407                         result = -ENOMEM;
2408                 }
2409         }
2410
2411         if (hw->mac.type == fm10k_mac_pf)
2412                 fm10k_dev_enable_intr_pf(dev);
2413         else
2414                 fm10k_dev_enable_intr_vf(dev);
2415         rte_intr_enable(intr_handle);
2416         hw->mac.ops.update_int_moderator(hw);
2417         return result;
2418 }
2419
2420 static int
2421 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2422 {
2423         struct fm10k_fault fault;
2424         int err;
2425         const char *estr = "Unknown error";
2426
2427         /* Process PCA fault */
2428         if (eicr & FM10K_EICR_PCA_FAULT) {
2429                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2430                 if (err)
2431                         goto error;
2432                 switch (fault.type) {
2433                 case PCA_NO_FAULT:
2434                         estr = "PCA_NO_FAULT"; break;
2435                 case PCA_UNMAPPED_ADDR:
2436                         estr = "PCA_UNMAPPED_ADDR"; break;
2437                 case PCA_BAD_QACCESS_PF:
2438                         estr = "PCA_BAD_QACCESS_PF"; break;
2439                 case PCA_BAD_QACCESS_VF:
2440                         estr = "PCA_BAD_QACCESS_VF"; break;
2441                 case PCA_MALICIOUS_REQ:
2442                         estr = "PCA_MALICIOUS_REQ"; break;
2443                 case PCA_POISONED_TLP:
2444                         estr = "PCA_POISONED_TLP"; break;
2445                 case PCA_TLP_ABORT:
2446                         estr = "PCA_TLP_ABORT"; break;
2447                 default:
2448                         goto error;
2449                 }
2450                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2451                         estr, fault.func ? "VF" : "PF", fault.func,
2452                         fault.address, fault.specinfo);
2453         }
2454
2455         /* Process THI fault */
2456         if (eicr & FM10K_EICR_THI_FAULT) {
2457                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2458                 if (err)
2459                         goto error;
2460                 switch (fault.type) {
2461                 case THI_NO_FAULT:
2462                         estr = "THI_NO_FAULT"; break;
2463                 case THI_MAL_DIS_Q_FAULT:
2464                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2465                 default:
2466                         goto error;
2467                 }
2468                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2469                         estr, fault.func ? "VF" : "PF", fault.func,
2470                         fault.address, fault.specinfo);
2471         }
2472
2473         /* Process FUM fault */
2474         if (eicr & FM10K_EICR_FUM_FAULT) {
2475                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2476                 if (err)
2477                         goto error;
2478                 switch (fault.type) {
2479                 case FUM_NO_FAULT:
2480                         estr = "FUM_NO_FAULT"; break;
2481                 case FUM_UNMAPPED_ADDR:
2482                         estr = "FUM_UNMAPPED_ADDR"; break;
2483                 case FUM_POISONED_TLP:
2484                         estr = "FUM_POISONED_TLP"; break;
2485                 case FUM_BAD_VF_QACCESS:
2486                         estr = "FUM_BAD_VF_QACCESS"; break;
2487                 case FUM_ADD_DECODE_ERR:
2488                         estr = "FUM_ADD_DECODE_ERR"; break;
2489                 case FUM_RO_ERROR:
2490                         estr = "FUM_RO_ERROR"; break;
2491                 case FUM_QPRC_CRC_ERROR:
2492                         estr = "FUM_QPRC_CRC_ERROR"; break;
2493                 case FUM_CSR_TIMEOUT:
2494                         estr = "FUM_CSR_TIMEOUT"; break;
2495                 case FUM_INVALID_TYPE:
2496                         estr = "FUM_INVALID_TYPE"; break;
2497                 case FUM_INVALID_LENGTH:
2498                         estr = "FUM_INVALID_LENGTH"; break;
2499                 case FUM_INVALID_BE:
2500                         estr = "FUM_INVALID_BE"; break;
2501                 case FUM_INVALID_ALIGN:
2502                         estr = "FUM_INVALID_ALIGN"; break;
2503                 default:
2504                         goto error;
2505                 }
2506                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2507                         estr, fault.func ? "VF" : "PF", fault.func,
2508                         fault.address, fault.specinfo);
2509         }
2510
2511         return 0;
2512 error:
2513         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2514         return err;
2515 }
2516
2517 /**
2518  * PF interrupt handler triggered by NIC for handling specific interrupt.
2519  *
2520  * @param handle
2521  *  Pointer to interrupt handle.
2522  * @param param
2523  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2524  *
2525  * @return
2526  *  void
2527  */
2528 static void
2529 fm10k_dev_interrupt_handler_pf(void *param)
2530 {
2531         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2532         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533         uint32_t cause, status;
2534         struct fm10k_dev_info *dev_info =
2535                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2536         int status_mbx;
2537         s32 err;
2538
2539         if (hw->mac.type != fm10k_mac_pf)
2540                 return;
2541
2542         cause = FM10K_READ_REG(hw, FM10K_EICR);
2543
2544         /* Handle PCI fault cases */
2545         if (cause & FM10K_EICR_FAULT_MASK) {
2546                 PMD_INIT_LOG(ERR, "INT: find fault!");
2547                 fm10k_dev_handle_fault(hw, cause);
2548         }
2549
2550         /* Handle switch up/down */
2551         if (cause & FM10K_EICR_SWITCHNOTREADY)
2552                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2553
2554         if (cause & FM10K_EICR_SWITCHREADY) {
2555                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2556                 if (dev_info->sm_down == 1) {
2557                         fm10k_mbx_lock(hw);
2558
2559                         /* For recreating logical ports */
2560                         status_mbx = hw->mac.ops.update_lport_state(hw,
2561                                         hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2562                         if (status_mbx == FM10K_SUCCESS)
2563                                 PMD_INIT_LOG(INFO,
2564                                         "INT: Recreated Logical port");
2565                         else
2566                                 PMD_INIT_LOG(INFO,
2567                                         "INT: Logical ports weren't recreated");
2568
2569                         status_mbx = hw->mac.ops.update_xcast_mode(hw,
2570                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2571                         if (status_mbx != FM10K_SUCCESS)
2572                                 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2573
2574                         fm10k_mbx_unlock(hw);
2575
2576                         /* first clear the internal SW recording structure */
2577                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2578                                                 ETH_MQ_RX_VMDQ_FLAG))
2579                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2580                                         false);
2581
2582                         fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2583                                         MAIN_VSI_POOL_NUMBER);
2584
2585                         /*
2586                          * Add default mac address and vlan for the logical
2587                          * ports that have been created, leave to the
2588                          * application to fully recover Rx filtering.
2589                          */
2590                         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2591                                         MAIN_VSI_POOL_NUMBER);
2592
2593                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2594                                                 ETH_MQ_RX_VMDQ_FLAG))
2595                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2596                                         true);
2597
2598                         dev_info->sm_down = 0;
2599                         _rte_eth_dev_callback_process(dev,
2600                                         RTE_ETH_EVENT_INTR_LSC,
2601                                         NULL);
2602                 }
2603         }
2604
2605         /* Handle mailbox message */
2606         fm10k_mbx_lock(hw);
2607         err = hw->mbx.ops.process(hw, &hw->mbx);
2608         fm10k_mbx_unlock(hw);
2609
2610         if (err == FM10K_ERR_RESET_REQUESTED) {
2611                 PMD_INIT_LOG(INFO, "INT: Switch is down");
2612                 dev_info->sm_down = 1;
2613                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2614                                 NULL);
2615         }
2616
2617         /* Handle SRAM error */
2618         if (cause & FM10K_EICR_SRAMERROR) {
2619                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2620
2621                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2622                 /* Write to clear pending bits */
2623                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2624
2625                 /* Todo: print out error message after shared code  updates */
2626         }
2627
2628         /* Clear these 3 events if having any */
2629         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2630                  FM10K_EICR_SWITCHREADY;
2631         if (cause)
2632                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2633
2634         /* Re-enable interrupt from device side */
2635         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2636                                         FM10K_ITR_MASK_CLEAR);
2637         /* Re-enable interrupt from host side */
2638         rte_intr_enable(dev->intr_handle);
2639 }
2640
2641 /**
2642  * VF interrupt handler triggered by NIC for handling specific interrupt.
2643  *
2644  * @param handle
2645  *  Pointer to interrupt handle.
2646  * @param param
2647  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2648  *
2649  * @return
2650  *  void
2651  */
2652 static void
2653 fm10k_dev_interrupt_handler_vf(void *param)
2654 {
2655         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2656         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2657         struct fm10k_mbx_info *mbx = &hw->mbx;
2658         struct fm10k_dev_info *dev_info =
2659                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2660         const enum fm10k_mbx_state state = mbx->state;
2661         int status_mbx;
2662
2663         if (hw->mac.type != fm10k_mac_vf)
2664                 return;
2665
2666         /* Handle mailbox message if lock is acquired */
2667         fm10k_mbx_lock(hw);
2668         hw->mbx.ops.process(hw, &hw->mbx);
2669         fm10k_mbx_unlock(hw);
2670
2671         if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2672                 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2673
2674                 fm10k_mbx_lock(hw);
2675                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2676                                 MAX_LPORT_NUM, 1);
2677                 fm10k_mbx_unlock(hw);
2678
2679                 /* Setting reset flag */
2680                 dev_info->sm_down = 1;
2681                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2682                                 NULL);
2683         }
2684
2685         if (dev_info->sm_down == 1 &&
2686                         hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2687                 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2688                 fm10k_mbx_lock(hw);
2689                 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2690                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2691                 if (status_mbx != FM10K_SUCCESS)
2692                         PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2693                 fm10k_mbx_unlock(hw);
2694
2695                 /* first clear the internal SW recording structure */
2696                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2697                 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2698                                 MAIN_VSI_POOL_NUMBER);
2699
2700                 /*
2701                  * Add default mac address and vlan for the logical ports that
2702                  * have been created, leave to the application to fully recover
2703                  * Rx filtering.
2704                  */
2705                 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2706                                 MAIN_VSI_POOL_NUMBER);
2707                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2708
2709                 dev_info->sm_down = 0;
2710                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2711                                 NULL);
2712         }
2713
2714         /* Re-enable interrupt from device side */
2715         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2716                                         FM10K_ITR_MASK_CLEAR);
2717         /* Re-enable interrupt from host side */
2718         rte_intr_enable(dev->intr_handle);
2719 }
2720
2721 /* Mailbox message handler in VF */
2722 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2723         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2724         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2725         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2726         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2727 };
2728
2729 static int
2730 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2731 {
2732         int err = 0;
2733
2734         /* Initialize mailbox lock */
2735         fm10k_mbx_initlock(hw);
2736
2737         /* Replace default message handler with new ones */
2738         if (hw->mac.type == fm10k_mac_vf)
2739                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2740
2741         if (err) {
2742                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2743                                 err);
2744                 return err;
2745         }
2746         /* Connect to SM for PF device or PF for VF device */
2747         return hw->mbx.ops.connect(hw, &hw->mbx);
2748 }
2749
2750 static void
2751 fm10k_close_mbx_service(struct fm10k_hw *hw)
2752 {
2753         /* Disconnect from SM for PF device or PF for VF device */
2754         hw->mbx.ops.disconnect(hw, &hw->mbx);
2755 }
2756
2757 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2758         .dev_configure          = fm10k_dev_configure,
2759         .dev_start              = fm10k_dev_start,
2760         .dev_stop               = fm10k_dev_stop,
2761         .dev_close              = fm10k_dev_close,
2762         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2763         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2764         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2765         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2766         .stats_get              = fm10k_stats_get,
2767         .xstats_get             = fm10k_xstats_get,
2768         .xstats_get_names       = fm10k_xstats_get_names,
2769         .stats_reset            = fm10k_stats_reset,
2770         .xstats_reset           = fm10k_stats_reset,
2771         .link_update            = fm10k_link_update,
2772         .dev_infos_get          = fm10k_dev_infos_get,
2773         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2774         .vlan_filter_set        = fm10k_vlan_filter_set,
2775         .vlan_offload_set       = fm10k_vlan_offload_set,
2776         .mac_addr_add           = fm10k_macaddr_add,
2777         .mac_addr_remove        = fm10k_macaddr_remove,
2778         .rx_queue_start         = fm10k_dev_rx_queue_start,
2779         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2780         .tx_queue_start         = fm10k_dev_tx_queue_start,
2781         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2782         .rx_queue_setup         = fm10k_rx_queue_setup,
2783         .rx_queue_release       = fm10k_rx_queue_release,
2784         .tx_queue_setup         = fm10k_tx_queue_setup,
2785         .tx_queue_release       = fm10k_tx_queue_release,
2786         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2787         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2788         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2789         .reta_update            = fm10k_reta_update,
2790         .reta_query             = fm10k_reta_query,
2791         .rss_hash_update        = fm10k_rss_hash_update,
2792         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2793 };
2794
2795 static int ftag_check_handler(__rte_unused const char *key,
2796                 const char *value, __rte_unused void *opaque)
2797 {
2798         if (strcmp(value, "1"))
2799                 return -1;
2800
2801         return 0;
2802 }
2803
2804 static int
2805 fm10k_check_ftag(struct rte_devargs *devargs)
2806 {
2807         struct rte_kvargs *kvlist;
2808         const char *ftag_key = "enable_ftag";
2809
2810         if (devargs == NULL)
2811                 return 0;
2812
2813         kvlist = rte_kvargs_parse(devargs->args, NULL);
2814         if (kvlist == NULL)
2815                 return 0;
2816
2817         if (!rte_kvargs_count(kvlist, ftag_key)) {
2818                 rte_kvargs_free(kvlist);
2819                 return 0;
2820         }
2821         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2822         if (rte_kvargs_process(kvlist, ftag_key,
2823                                 ftag_check_handler, NULL) < 0) {
2824                 rte_kvargs_free(kvlist);
2825                 return 0;
2826         }
2827         rte_kvargs_free(kvlist);
2828
2829         return 1;
2830 }
2831
2832 static uint16_t
2833 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2834                     uint16_t nb_pkts)
2835 {
2836         uint16_t nb_tx = 0;
2837         struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2838
2839         while (nb_pkts) {
2840                 uint16_t ret, num;
2841
2842                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2843                 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2844                                                  num);
2845                 nb_tx += ret;
2846                 nb_pkts -= ret;
2847                 if (ret < num)
2848                         break;
2849         }
2850
2851         return nb_tx;
2852 }
2853
2854 static void __attribute__((cold))
2855 fm10k_set_tx_function(struct rte_eth_dev *dev)
2856 {
2857         struct fm10k_tx_queue *txq;
2858         int i;
2859         int use_sse = 1;
2860         uint16_t tx_ftag_en = 0;
2861
2862         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2863                 /* primary process has set the ftag flag and txq_flags */
2864                 txq = dev->data->tx_queues[0];
2865                 if (fm10k_tx_vec_condition_check(txq)) {
2866                         dev->tx_pkt_burst = fm10k_xmit_pkts;
2867                         dev->tx_pkt_prepare = fm10k_prep_pkts;
2868                         PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2869                 } else {
2870                         PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2871                         dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2872                         dev->tx_pkt_prepare = NULL;
2873                 }
2874                 return;
2875         }
2876
2877         if (fm10k_check_ftag(dev->device->devargs))
2878                 tx_ftag_en = 1;
2879
2880         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2881                 txq = dev->data->tx_queues[i];
2882                 txq->tx_ftag_en = tx_ftag_en;
2883                 /* Check if Vector Tx is satisfied */
2884                 if (fm10k_tx_vec_condition_check(txq))
2885                         use_sse = 0;
2886         }
2887
2888         if (use_sse) {
2889                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2890                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2891                         txq = dev->data->tx_queues[i];
2892                         fm10k_txq_vec_setup(txq);
2893                 }
2894                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2895                 dev->tx_pkt_prepare = NULL;
2896         } else {
2897                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2898                 dev->tx_pkt_prepare = fm10k_prep_pkts;
2899                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2900         }
2901 }
2902
2903 static void __attribute__((cold))
2904 fm10k_set_rx_function(struct rte_eth_dev *dev)
2905 {
2906         struct fm10k_dev_info *dev_info =
2907                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2908         uint16_t i, rx_using_sse;
2909         uint16_t rx_ftag_en = 0;
2910
2911         if (fm10k_check_ftag(dev->device->devargs))
2912                 rx_ftag_en = 1;
2913
2914         /* In order to allow Vector Rx there are a few configuration
2915          * conditions to be met.
2916          */
2917         if (!fm10k_rx_vec_condition_check(dev) &&
2918                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2919                 if (dev->data->scattered_rx)
2920                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2921                 else
2922                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2923         } else if (dev->data->scattered_rx)
2924                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2925         else
2926                 dev->rx_pkt_burst = fm10k_recv_pkts;
2927
2928         rx_using_sse =
2929                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2930                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2931
2932         if (rx_using_sse)
2933                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2934         else
2935                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2936
2937         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2938                 return;
2939
2940         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2941                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2942
2943                 rxq->rx_using_sse = rx_using_sse;
2944                 rxq->rx_ftag_en = rx_ftag_en;
2945         }
2946 }
2947
2948 static void
2949 fm10k_params_init(struct rte_eth_dev *dev)
2950 {
2951         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2952         struct fm10k_dev_info *info =
2953                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2954
2955         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2956          * there is no way to get link status without reading BAR4.  Until this
2957          * works, assume we have maximum bandwidth.
2958          * @todo - fix bus info
2959          */
2960         hw->bus_caps.speed = fm10k_bus_speed_8000;
2961         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2962         hw->bus_caps.payload = fm10k_bus_payload_512;
2963         hw->bus.speed = fm10k_bus_speed_8000;
2964         hw->bus.width = fm10k_bus_width_pcie_x8;
2965         hw->bus.payload = fm10k_bus_payload_256;
2966
2967         info->rx_vec_allowed = true;
2968 }
2969
2970 static int
2971 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2972 {
2973         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2974         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2975         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2976         int diag, i;
2977         struct fm10k_macvlan_filter_info *macvlan;
2978
2979         PMD_INIT_FUNC_TRACE();
2980
2981         dev->dev_ops = &fm10k_eth_dev_ops;
2982         dev->rx_pkt_burst = &fm10k_recv_pkts;
2983         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2984         dev->tx_pkt_prepare = &fm10k_prep_pkts;
2985
2986         /*
2987          * Primary process does the whole initialization, for secondary
2988          * processes, we just select the same Rx and Tx function as primary.
2989          */
2990         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2991                 fm10k_set_rx_function(dev);
2992                 fm10k_set_tx_function(dev);
2993                 return 0;
2994         }
2995
2996         rte_eth_copy_pci_info(dev, pdev);
2997
2998         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2999         memset(macvlan, 0, sizeof(*macvlan));
3000         /* Vendor and Device ID need to be set before init of shared code */
3001         memset(hw, 0, sizeof(*hw));
3002         hw->device_id = pdev->id.device_id;
3003         hw->vendor_id = pdev->id.vendor_id;
3004         hw->subsystem_device_id = pdev->id.subsystem_device_id;
3005         hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3006         hw->revision_id = 0;
3007         hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3008         if (hw->hw_addr == NULL) {
3009                 PMD_INIT_LOG(ERR, "Bad mem resource."
3010                         " Try to blacklist unused devices.");
3011                 return -EIO;
3012         }
3013
3014         /* Store fm10k_adapter pointer */
3015         hw->back = dev->data->dev_private;
3016
3017         /* Initialize the shared code */
3018         diag = fm10k_init_shared_code(hw);
3019         if (diag != FM10K_SUCCESS) {
3020                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3021                 return -EIO;
3022         }
3023
3024         /* Initialize parameters */
3025         fm10k_params_init(dev);
3026
3027         /* Initialize the hw */
3028         diag = fm10k_init_hw(hw);
3029         if (diag != FM10K_SUCCESS) {
3030                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3031                 return -EIO;
3032         }
3033
3034         /* Initialize MAC address(es) */
3035         dev->data->mac_addrs = rte_zmalloc("fm10k",
3036                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3037         if (dev->data->mac_addrs == NULL) {
3038                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3039                 return -ENOMEM;
3040         }
3041
3042         diag = fm10k_read_mac_addr(hw);
3043
3044         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3045                         &dev->data->mac_addrs[0]);
3046
3047         if (diag != FM10K_SUCCESS ||
3048                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3049
3050                 /* Generate a random addr */
3051                 eth_random_addr(hw->mac.addr);
3052                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3053                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3054                 &dev->data->mac_addrs[0]);
3055         }
3056
3057         /* Reset the hw statistics */
3058         fm10k_stats_reset(dev);
3059
3060         /* Reset the hw */
3061         diag = fm10k_reset_hw(hw);
3062         if (diag != FM10K_SUCCESS) {
3063                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3064                 return -EIO;
3065         }
3066
3067         /* Setup mailbox service */
3068         diag = fm10k_setup_mbx_service(hw);
3069         if (diag != FM10K_SUCCESS) {
3070                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3071                 return -EIO;
3072         }
3073
3074         /*PF/VF has different interrupt handling mechanism */
3075         if (hw->mac.type == fm10k_mac_pf) {
3076                 /* register callback func to eal lib */
3077                 rte_intr_callback_register(intr_handle,
3078                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3079
3080                 /* enable MISC interrupt */
3081                 fm10k_dev_enable_intr_pf(dev);
3082         } else { /* VF */
3083                 rte_intr_callback_register(intr_handle,
3084                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3085
3086                 fm10k_dev_enable_intr_vf(dev);
3087         }
3088
3089         /* Enable intr after callback registered */
3090         rte_intr_enable(intr_handle);
3091
3092         hw->mac.ops.update_int_moderator(hw);
3093
3094         /* Make sure Switch Manager is ready before going forward. */
3095         if (hw->mac.type == fm10k_mac_pf) {
3096                 int switch_ready = 0;
3097
3098                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3099                         fm10k_mbx_lock(hw);
3100                         hw->mac.ops.get_host_state(hw, &switch_ready);
3101                         fm10k_mbx_unlock(hw);
3102                         if (switch_ready)
3103                                 break;
3104                         /* Delay some time to acquire async LPORT_MAP info. */
3105                         rte_delay_us(WAIT_SWITCH_MSG_US);
3106                 }
3107
3108                 if (switch_ready == 0) {
3109                         PMD_INIT_LOG(ERR, "switch is not ready");
3110                         return -1;
3111                 }
3112         }
3113
3114         /*
3115          * Below function will trigger operations on mailbox, acquire lock to
3116          * avoid race condition from interrupt handler. Operations on mailbox
3117          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3118          * will handle and generate an interrupt to our side. Then,  FIFO in
3119          * mailbox will be touched.
3120          */
3121         fm10k_mbx_lock(hw);
3122         /* Enable port first */
3123         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3124                                         MAX_LPORT_NUM, 1);
3125
3126         /* Set unicast mode by default. App can change to other mode in other
3127          * API func.
3128          */
3129         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3130                                         FM10K_XCAST_MODE_NONE);
3131
3132         fm10k_mbx_unlock(hw);
3133
3134         /* Make sure default VID is ready before going forward. */
3135         if (hw->mac.type == fm10k_mac_pf) {
3136                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3137                         if (hw->mac.default_vid)
3138                                 break;
3139                         /* Delay some time to acquire async port VLAN info. */
3140                         rte_delay_us(WAIT_SWITCH_MSG_US);
3141                 }
3142
3143                 if (!hw->mac.default_vid) {
3144                         PMD_INIT_LOG(ERR, "default VID is not ready");
3145                         return -1;
3146                 }
3147         }
3148
3149         /* Add default mac address */
3150         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3151                 MAIN_VSI_POOL_NUMBER);
3152
3153         return 0;
3154 }
3155
3156 static int
3157 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3158 {
3159         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3160         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3161         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3162         PMD_INIT_FUNC_TRACE();
3163
3164         /* only uninitialize in the primary process */
3165         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3166                 return 0;
3167
3168         /* safe to close dev here */
3169         fm10k_dev_close(dev);
3170
3171         dev->dev_ops = NULL;
3172         dev->rx_pkt_burst = NULL;
3173         dev->tx_pkt_burst = NULL;
3174
3175         /* disable uio/vfio intr */
3176         rte_intr_disable(intr_handle);
3177
3178         /*PF/VF has different interrupt handling mechanism */
3179         if (hw->mac.type == fm10k_mac_pf) {
3180                 /* disable interrupt */
3181                 fm10k_dev_disable_intr_pf(dev);
3182
3183                 /* unregister callback func to eal lib */
3184                 rte_intr_callback_unregister(intr_handle,
3185                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3186         } else {
3187                 /* disable interrupt */
3188                 fm10k_dev_disable_intr_vf(dev);
3189
3190                 rte_intr_callback_unregister(intr_handle,
3191                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3192         }
3193
3194         /* free mac memory */
3195         if (dev->data->mac_addrs) {
3196                 rte_free(dev->data->mac_addrs);
3197                 dev->data->mac_addrs = NULL;
3198         }
3199
3200         memset(hw, 0, sizeof(*hw));
3201
3202         return 0;
3203 }
3204
3205 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3206         struct rte_pci_device *pci_dev)
3207 {
3208         return rte_eth_dev_pci_generic_probe(pci_dev,
3209                 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3210 }
3211
3212 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3213 {
3214         return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3215 }
3216
3217 /*
3218  * The set of PCI devices this driver supports. This driver will enable both PF
3219  * and SRIOV-VF devices.
3220  */
3221 static const struct rte_pci_id pci_id_fm10k_map[] = {
3222         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3223         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3224         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3225         { .vendor_id = 0, /* sentinel */ },
3226 };
3227
3228 static struct rte_pci_driver rte_pmd_fm10k = {
3229         .id_table = pci_id_fm10k_map,
3230         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3231                      RTE_PCI_DRV_IOVA_AS_VA,
3232         .probe = eth_fm10k_pci_probe,
3233         .remove = eth_fm10k_pci_remove,
3234 };
3235
3236 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3237 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3238 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3239
3240 RTE_INIT(fm10k_init_log);
3241 static void
3242 fm10k_init_log(void)
3243 {
3244         fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
3245         if (fm10k_logtype_init >= 0)
3246                 rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
3247         fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
3248         if (fm10k_logtype_driver >= 0)
3249                 rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
3250 }