Imported Upstream version 16.11.1
[deb_dpdk.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <stdio.h>
36
37 #include <sys/stat.h>
38 #include <sys/mman.h>
39 #include <fcntl.h>
40 #include <libgen.h>
41
42 #include <rte_pci.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_mbuf.h>
46 #include <rte_string_fns.h>
47 #include <rte_ethdev.h>
48
49 #include "enic_compat.h"
50 #include "enic.h"
51 #include "wq_enet_desc.h"
52 #include "rq_enet_desc.h"
53 #include "cq_enet_desc.h"
54 #include "vnic_enet.h"
55 #include "vnic_dev.h"
56 #include "vnic_wq.h"
57 #include "vnic_rq.h"
58 #include "vnic_cq.h"
59 #include "vnic_intr.h"
60 #include "vnic_nic.h"
61
62 static inline int enic_is_sriov_vf(struct enic *enic)
63 {
64         return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
65 }
66
67 static int is_zero_addr(uint8_t *addr)
68 {
69         return !(addr[0] |  addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
70 }
71
72 static int is_mcast_addr(uint8_t *addr)
73 {
74         return addr[0] & 1;
75 }
76
77 static int is_eth_addr_valid(uint8_t *addr)
78 {
79         return !is_mcast_addr(addr) && !is_zero_addr(addr);
80 }
81
82 static void
83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
84 {
85         uint16_t i;
86
87         if (!rq || !rq->mbuf_ring) {
88                 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
89                 return;
90         }
91
92         for (i = 0; i < rq->ring.desc_count; i++) {
93                 if (rq->mbuf_ring[i]) {
94                         rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
95                         rq->mbuf_ring[i] = NULL;
96                 }
97         }
98 }
99
100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
101 {
102         vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
103 }
104
105 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
106 {
107         struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
108
109         rte_pktmbuf_free_seg(mbuf);
110         buf->mb = NULL;
111 }
112
113 static void enic_log_q_error(struct enic *enic)
114 {
115         unsigned int i;
116         u32 error_status;
117
118         for (i = 0; i < enic->wq_count; i++) {
119                 error_status = vnic_wq_error_status(&enic->wq[i]);
120                 if (error_status)
121                         dev_err(enic, "WQ[%d] error_status %d\n", i,
122                                 error_status);
123         }
124
125         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
126                 if (!enic->rq[i].in_use)
127                         continue;
128                 error_status = vnic_rq_error_status(&enic->rq[i]);
129                 if (error_status)
130                         dev_err(enic, "RQ[%d] error_status %d\n", i,
131                                 error_status);
132         }
133 }
134
135 static void enic_clear_soft_stats(struct enic *enic)
136 {
137         struct enic_soft_stats *soft_stats = &enic->soft_stats;
138         rte_atomic64_clear(&soft_stats->rx_nombuf);
139         rte_atomic64_clear(&soft_stats->rx_packet_errors);
140         rte_atomic64_clear(&soft_stats->tx_oversized);
141 }
142
143 static void enic_init_soft_stats(struct enic *enic)
144 {
145         struct enic_soft_stats *soft_stats = &enic->soft_stats;
146         rte_atomic64_init(&soft_stats->rx_nombuf);
147         rte_atomic64_init(&soft_stats->rx_packet_errors);
148         rte_atomic64_init(&soft_stats->tx_oversized);
149         enic_clear_soft_stats(enic);
150 }
151
152 void enic_dev_stats_clear(struct enic *enic)
153 {
154         if (vnic_dev_stats_clear(enic->vdev))
155                 dev_err(enic, "Error in clearing stats\n");
156         enic_clear_soft_stats(enic);
157 }
158
159 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
160 {
161         struct vnic_stats *stats;
162         struct enic_soft_stats *soft_stats = &enic->soft_stats;
163         int64_t rx_truncated;
164         uint64_t rx_packet_errors;
165
166         if (vnic_dev_stats_dump(enic->vdev, &stats)) {
167                 dev_err(enic, "Error in getting stats\n");
168                 return;
169         }
170
171         /* The number of truncated packets can only be calculated by
172          * subtracting a hardware counter from error packets received by
173          * the driver. Note: this causes transient inaccuracies in the
174          * ipackets count. Also, the length of truncated packets are
175          * counted in ibytes even though truncated packets are dropped
176          * which can make ibytes be slightly higher than it should be.
177          */
178         rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
179         rx_truncated = rx_packet_errors - stats->rx.rx_errors;
180
181         r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
182         r_stats->opackets = stats->tx.tx_frames_ok;
183
184         r_stats->ibytes = stats->rx.rx_bytes_ok;
185         r_stats->obytes = stats->tx.tx_bytes_ok;
186
187         r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
188         r_stats->oerrors = stats->tx.tx_errors
189                            + rte_atomic64_read(&soft_stats->tx_oversized);
190
191         r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
192
193         r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
194 }
195
196 void enic_del_mac_address(struct enic *enic)
197 {
198         if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
199                 dev_err(enic, "del mac addr failed\n");
200 }
201
202 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
203 {
204         int err;
205
206         if (!is_eth_addr_valid(mac_addr)) {
207                 dev_err(enic, "invalid mac address\n");
208                 return;
209         }
210
211         err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
212         if (err) {
213                 dev_err(enic, "del mac addr failed\n");
214                 return;
215         }
216
217         ether_addr_copy((struct ether_addr *)mac_addr,
218                 (struct ether_addr *)enic->mac_addr);
219
220         err = vnic_dev_add_addr(enic->vdev, mac_addr);
221         if (err) {
222                 dev_err(enic, "add mac addr failed\n");
223                 return;
224         }
225 }
226
227 static void
228 enic_free_rq_buf(struct rte_mbuf **mbuf)
229 {
230         if (*mbuf == NULL)
231                 return;
232
233         rte_pktmbuf_free(*mbuf);
234         mbuf = NULL;
235 }
236
237 void enic_init_vnic_resources(struct enic *enic)
238 {
239         unsigned int error_interrupt_enable = 1;
240         unsigned int error_interrupt_offset = 0;
241         unsigned int index = 0;
242         unsigned int cq_idx;
243         struct vnic_rq *data_rq;
244
245         for (index = 0; index < enic->rq_count; index++) {
246                 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
247
248                 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
249                         cq_idx,
250                         error_interrupt_enable,
251                         error_interrupt_offset);
252
253                 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
254                 if (data_rq->in_use)
255                         vnic_rq_init(data_rq,
256                                      cq_idx,
257                                      error_interrupt_enable,
258                                      error_interrupt_offset);
259
260                 vnic_cq_init(&enic->cq[cq_idx],
261                         0 /* flow_control_enable */,
262                         1 /* color_enable */,
263                         0 /* cq_head */,
264                         0 /* cq_tail */,
265                         1 /* cq_tail_color */,
266                         0 /* interrupt_enable */,
267                         1 /* cq_entry_enable */,
268                         0 /* cq_message_enable */,
269                         0 /* interrupt offset */,
270                         0 /* cq_message_addr */);
271         }
272
273         for (index = 0; index < enic->wq_count; index++) {
274                 vnic_wq_init(&enic->wq[index],
275                         enic_cq_wq(enic, index),
276                         error_interrupt_enable,
277                         error_interrupt_offset);
278
279                 cq_idx = enic_cq_wq(enic, index);
280                 vnic_cq_init(&enic->cq[cq_idx],
281                         0 /* flow_control_enable */,
282                         1 /* color_enable */,
283                         0 /* cq_head */,
284                         0 /* cq_tail */,
285                         1 /* cq_tail_color */,
286                         0 /* interrupt_enable */,
287                         0 /* cq_entry_enable */,
288                         1 /* cq_message_enable */,
289                         0 /* interrupt offset */,
290                         (u64)enic->wq[index].cqmsg_rz->phys_addr);
291         }
292
293         vnic_intr_init(&enic->intr,
294                 enic->config.intr_timer_usec,
295                 enic->config.intr_timer_type,
296                 /*mask_on_assertion*/1);
297 }
298
299
300 static int
301 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
302 {
303         struct rte_mbuf *mb;
304         struct rq_enet_desc *rqd = rq->ring.descs;
305         unsigned i;
306         dma_addr_t dma_addr;
307
308         if (!rq->in_use)
309                 return 0;
310
311         dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
312                   rq->ring.desc_count);
313
314         for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
315                 mb = rte_mbuf_raw_alloc(rq->mp);
316                 if (mb == NULL) {
317                         dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
318                         (unsigned)rq->index);
319                         return -ENOMEM;
320                 }
321
322                 mb->data_off = RTE_PKTMBUF_HEADROOM;
323                 dma_addr = (dma_addr_t)(mb->buf_physaddr
324                            + RTE_PKTMBUF_HEADROOM);
325                 rq_enet_desc_enc(rqd, dma_addr,
326                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
327                                 : RQ_ENET_TYPE_NOT_SOP),
328                                 mb->buf_len - RTE_PKTMBUF_HEADROOM);
329                 rq->mbuf_ring[i] = mb;
330         }
331
332         /* make sure all prior writes are complete before doing the PIO write */
333         rte_rmb();
334
335         /* Post all but the last buffer to VIC. */
336         rq->posted_index = rq->ring.desc_count - 1;
337
338         rq->rx_nb_hold = 0;
339
340         dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
341                 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
342         iowrite32(rq->posted_index, &rq->ctrl->posted_index);
343         iowrite32(0, &rq->ctrl->fetch_index);
344         rte_rmb();
345
346         return 0;
347
348 }
349
350 static void *
351 enic_alloc_consistent(void *priv, size_t size,
352         dma_addr_t *dma_handle, u8 *name)
353 {
354         void *vaddr;
355         const struct rte_memzone *rz;
356         *dma_handle = 0;
357         struct enic *enic = (struct enic *)priv;
358         struct enic_memzone_entry *mze;
359
360         rz = rte_memzone_reserve_aligned((const char *)name,
361                                          size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
362         if (!rz) {
363                 pr_err("%s : Failed to allocate memory requested for %s\n",
364                         __func__, name);
365                 return NULL;
366         }
367
368         vaddr = rz->addr;
369         *dma_handle = (dma_addr_t)rz->phys_addr;
370
371         mze = rte_malloc("enic memzone entry",
372                          sizeof(struct enic_memzone_entry), 0);
373
374         if (!mze) {
375                 pr_err("%s : Failed to allocate memory for memzone list\n",
376                        __func__);
377                 rte_memzone_free(rz);
378         }
379
380         mze->rz = rz;
381
382         rte_spinlock_lock(&enic->memzone_list_lock);
383         LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
384         rte_spinlock_unlock(&enic->memzone_list_lock);
385
386         return vaddr;
387 }
388
389 static void
390 enic_free_consistent(void *priv,
391                      __rte_unused size_t size,
392                      void *vaddr,
393                      dma_addr_t dma_handle)
394 {
395         struct enic_memzone_entry *mze;
396         struct enic *enic = (struct enic *)priv;
397
398         rte_spinlock_lock(&enic->memzone_list_lock);
399         LIST_FOREACH(mze, &enic->memzone_list, entries) {
400                 if (mze->rz->addr == vaddr &&
401                     mze->rz->phys_addr == dma_handle)
402                         break;
403         }
404         if (mze == NULL) {
405                 rte_spinlock_unlock(&enic->memzone_list_lock);
406                 dev_warning(enic,
407                             "Tried to free memory, but couldn't find it in the memzone list\n");
408                 return;
409         }
410         LIST_REMOVE(mze, entries);
411         rte_spinlock_unlock(&enic->memzone_list_lock);
412         rte_memzone_free(mze->rz);
413         rte_free(mze);
414 }
415
416 int enic_link_update(struct enic *enic)
417 {
418         struct rte_eth_dev *eth_dev = enic->rte_dev;
419         int ret;
420         int link_status = 0;
421
422         link_status = enic_get_link_status(enic);
423         ret = (link_status == enic->link_status);
424         enic->link_status = link_status;
425         eth_dev->data->dev_link.link_status = link_status;
426         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
427         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
428         return ret;
429 }
430
431 static void
432 enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
433         void *arg)
434 {
435         struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
436         struct enic *enic = pmd_priv(dev);
437
438         vnic_intr_return_all_credits(&enic->intr);
439
440         enic_link_update(enic);
441         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
442         enic_log_q_error(enic);
443 }
444
445 int enic_enable(struct enic *enic)
446 {
447         unsigned int index;
448         int err;
449         struct rte_eth_dev *eth_dev = enic->rte_dev;
450
451         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
452         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
453
454         /* vnic notification of link status has already been turned on in
455          * enic_dev_init() which is called during probe time.  Here we are
456          * just turning on interrupt vector 0 if needed.
457          */
458         if (eth_dev->data->dev_conf.intr_conf.lsc)
459                 vnic_dev_notify_set(enic->vdev, 0);
460
461         if (enic_clsf_init(enic))
462                 dev_warning(enic, "Init of hash table for clsf failed."\
463                         "Flow director feature will not work\n");
464
465         for (index = 0; index < enic->rq_count; index++) {
466                 err = enic_alloc_rx_queue_mbufs(enic,
467                         &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
468                 if (err) {
469                         dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
470                         return err;
471                 }
472                 err = enic_alloc_rx_queue_mbufs(enic,
473                         &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
474                 if (err) {
475                         /* release the allocated mbufs for the sop rq*/
476                         enic_rxmbuf_queue_release(enic,
477                                 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
478
479                         dev_err(enic, "Failed to alloc data RX queue mbufs\n");
480                         return err;
481                 }
482         }
483
484         for (index = 0; index < enic->wq_count; index++)
485                 enic_start_wq(enic, index);
486         for (index = 0; index < enic->rq_count; index++)
487                 enic_start_rq(enic, index);
488
489         vnic_dev_add_addr(enic->vdev, enic->mac_addr);
490
491         vnic_dev_enable_wait(enic->vdev);
492
493         /* Register and enable error interrupt */
494         rte_intr_callback_register(&(enic->pdev->intr_handle),
495                 enic_intr_handler, (void *)enic->rte_dev);
496
497         rte_intr_enable(&(enic->pdev->intr_handle));
498         vnic_intr_unmask(&enic->intr);
499
500         return 0;
501 }
502
503 int enic_alloc_intr_resources(struct enic *enic)
504 {
505         int err;
506
507         dev_info(enic, "vNIC resources used:  "\
508                 "wq %d rq %d cq %d intr %d\n",
509                 enic->wq_count, enic_vnic_rq_count(enic),
510                 enic->cq_count, enic->intr_count);
511
512         err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
513         if (err)
514                 enic_free_vnic_resources(enic);
515
516         return err;
517 }
518
519 void enic_free_rq(void *rxq)
520 {
521         struct vnic_rq *rq_sop, *rq_data;
522         struct enic *enic;
523
524         if (rxq == NULL)
525                 return;
526
527         rq_sop = (struct vnic_rq *)rxq;
528         enic = vnic_dev_priv(rq_sop->vdev);
529         rq_data = &enic->rq[rq_sop->data_queue_idx];
530
531         enic_rxmbuf_queue_release(enic, rq_sop);
532         if (rq_data->in_use)
533                 enic_rxmbuf_queue_release(enic, rq_data);
534
535         rte_free(rq_sop->mbuf_ring);
536         if (rq_data->in_use)
537                 rte_free(rq_data->mbuf_ring);
538
539         rq_sop->mbuf_ring = NULL;
540         rq_data->mbuf_ring = NULL;
541
542         vnic_rq_free(rq_sop);
543         if (rq_data->in_use)
544                 vnic_rq_free(rq_data);
545
546         vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
547
548         rq_sop->in_use = 0;
549         rq_data->in_use = 0;
550 }
551
552 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
553 {
554         struct rte_eth_dev *eth_dev = enic->rte_dev;
555         vnic_wq_enable(&enic->wq[queue_idx]);
556         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
557 }
558
559 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
560 {
561         struct rte_eth_dev *eth_dev = enic->rte_dev;
562         int ret;
563
564         ret = vnic_wq_disable(&enic->wq[queue_idx]);
565         if (ret)
566                 return ret;
567
568         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
569         return 0;
570 }
571
572 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
573 {
574         struct vnic_rq *rq_sop;
575         struct vnic_rq *rq_data;
576         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
577         rq_data = &enic->rq[rq_sop->data_queue_idx];
578         struct rte_eth_dev *eth_dev = enic->rte_dev;
579
580         if (rq_data->in_use)
581                 vnic_rq_enable(rq_data);
582         rte_mb();
583         vnic_rq_enable(rq_sop);
584         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
585 }
586
587 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
588 {
589         int ret1 = 0, ret2 = 0;
590         struct rte_eth_dev *eth_dev = enic->rte_dev;
591         struct vnic_rq *rq_sop;
592         struct vnic_rq *rq_data;
593         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
594         rq_data = &enic->rq[rq_sop->data_queue_idx];
595
596         ret2 = vnic_rq_disable(rq_sop);
597         rte_mb();
598         if (rq_data->in_use)
599                 ret1 = vnic_rq_disable(rq_data);
600
601         if (ret2)
602                 return ret2;
603         else if (ret1)
604                 return ret1;
605
606         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
607         return 0;
608 }
609
610 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
611         unsigned int socket_id, struct rte_mempool *mp,
612         uint16_t nb_desc, uint16_t free_thresh)
613 {
614         int rc;
615         uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
616         uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
617         struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
618         struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
619         unsigned int mbuf_size, mbufs_per_pkt;
620         unsigned int nb_sop_desc, nb_data_desc;
621         uint16_t min_sop, max_sop, min_data, max_data;
622         uint16_t mtu = enic->rte_dev->data->mtu;
623
624         rq_sop->is_sop = 1;
625         rq_sop->data_queue_idx = data_queue_idx;
626         rq_data->is_sop = 0;
627         rq_data->data_queue_idx = 0;
628         rq_sop->socket_id = socket_id;
629         rq_sop->mp = mp;
630         rq_data->socket_id = socket_id;
631         rq_data->mp = mp;
632         rq_sop->in_use = 1;
633         rq_sop->rx_free_thresh = free_thresh;
634         rq_data->rx_free_thresh = free_thresh;
635         dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
636                   free_thresh);
637
638         mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
639                                RTE_PKTMBUF_HEADROOM);
640
641         if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
642                 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
643                 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
644                 mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
645                                  (mbuf_size - 1)) / mbuf_size;
646         } else {
647                 dev_info(enic, "Scatter rx mode disabled\n");
648                 mbufs_per_pkt = 1;
649         }
650
651         if (mbufs_per_pkt > 1) {
652                 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
653                 rq_sop->data_queue_enable = 1;
654                 rq_data->in_use = 1;
655         } else {
656                 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
657                          queue_idx);
658                 rq_sop->data_queue_enable = 0;
659                 rq_data->in_use = 0;
660         }
661
662         /* number of descriptors have to be a multiple of 32 */
663         nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
664         nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
665
666         rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
667         rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
668
669         if (mbufs_per_pkt > 1) {
670                 min_sop = 64;
671                 max_sop = ((enic->config.rq_desc_count /
672                             (mbufs_per_pkt - 1)) & ~0x1F);
673                 min_data = min_sop * (mbufs_per_pkt - 1);
674                 max_data = enic->config.rq_desc_count;
675         } else {
676                 min_sop = 64;
677                 max_sop = enic->config.rq_desc_count;
678                 min_data = 0;
679                 max_data = 0;
680         }
681
682         if (nb_desc < (min_sop + min_data)) {
683                 dev_warning(enic,
684                             "Number of rx descs too low, adjusting to minimum\n");
685                 nb_sop_desc = min_sop;
686                 nb_data_desc = min_data;
687         } else if (nb_desc > (max_sop + max_data)) {
688                 dev_warning(enic,
689                             "Number of rx_descs too high, adjusting to maximum\n");
690                 nb_sop_desc = max_sop;
691                 nb_data_desc = max_data;
692         }
693         if (mbufs_per_pkt > 1) {
694                 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
695                          mtu, mbuf_size, min_sop + min_data,
696                          max_sop + max_data);
697         }
698         dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
699                  nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
700
701         /* Allocate sop queue resources */
702         rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
703                 nb_sop_desc, sizeof(struct rq_enet_desc));
704         if (rc) {
705                 dev_err(enic, "error in allocation of sop rq\n");
706                 goto err_exit;
707         }
708         nb_sop_desc = rq_sop->ring.desc_count;
709
710         if (rq_data->in_use) {
711                 /* Allocate data queue resources */
712                 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
713                                    nb_data_desc,
714                                    sizeof(struct rq_enet_desc));
715                 if (rc) {
716                         dev_err(enic, "error in allocation of data rq\n");
717                         goto err_free_rq_sop;
718                 }
719                 nb_data_desc = rq_data->ring.desc_count;
720         }
721         rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
722                            socket_id, nb_sop_desc + nb_data_desc,
723                            sizeof(struct cq_enet_rq_desc));
724         if (rc) {
725                 dev_err(enic, "error in allocation of cq for rq\n");
726                 goto err_free_rq_data;
727         }
728
729         /* Allocate the mbuf rings */
730         rq_sop->mbuf_ring = (struct rte_mbuf **)
731                 rte_zmalloc_socket("rq->mbuf_ring",
732                                    sizeof(struct rte_mbuf *) * nb_sop_desc,
733                                    RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
734         if (rq_sop->mbuf_ring == NULL)
735                 goto err_free_cq;
736
737         if (rq_data->in_use) {
738                 rq_data->mbuf_ring = (struct rte_mbuf **)
739                         rte_zmalloc_socket("rq->mbuf_ring",
740                                 sizeof(struct rte_mbuf *) * nb_data_desc,
741                                 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
742                 if (rq_data->mbuf_ring == NULL)
743                         goto err_free_sop_mbuf;
744         }
745
746         rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
747
748         return 0;
749
750 err_free_sop_mbuf:
751         rte_free(rq_sop->mbuf_ring);
752 err_free_cq:
753         /* cleanup on error */
754         vnic_cq_free(&enic->cq[queue_idx]);
755 err_free_rq_data:
756         if (rq_data->in_use)
757                 vnic_rq_free(rq_data);
758 err_free_rq_sop:
759         vnic_rq_free(rq_sop);
760 err_exit:
761         return -ENOMEM;
762 }
763
764 void enic_free_wq(void *txq)
765 {
766         struct vnic_wq *wq;
767         struct enic *enic;
768
769         if (txq == NULL)
770                 return;
771
772         wq = (struct vnic_wq *)txq;
773         enic = vnic_dev_priv(wq->vdev);
774         rte_memzone_free(wq->cqmsg_rz);
775         vnic_wq_free(wq);
776         vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
777 }
778
779 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
780         unsigned int socket_id, uint16_t nb_desc)
781 {
782         int err;
783         struct vnic_wq *wq = &enic->wq[queue_idx];
784         unsigned int cq_index = enic_cq_wq(enic, queue_idx);
785         char name[NAME_MAX];
786         static int instance;
787
788         wq->socket_id = socket_id;
789         if (nb_desc) {
790                 if (nb_desc > enic->config.wq_desc_count) {
791                         dev_warning(enic,
792                                 "WQ %d - number of tx desc in cmd line (%d)"\
793                                 "is greater than that in the UCSM/CIMC adapter"\
794                                 "policy.  Applying the value in the adapter "\
795                                 "policy (%d)\n",
796                                 queue_idx, nb_desc, enic->config.wq_desc_count);
797                 } else if (nb_desc != enic->config.wq_desc_count) {
798                         enic->config.wq_desc_count = nb_desc;
799                         dev_info(enic,
800                                 "TX Queues - effective number of descs:%d\n",
801                                 nb_desc);
802                 }
803         }
804
805         /* Allocate queue resources */
806         err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
807                 enic->config.wq_desc_count,
808                 sizeof(struct wq_enet_desc));
809         if (err) {
810                 dev_err(enic, "error in allocation of wq\n");
811                 return err;
812         }
813
814         err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
815                 socket_id, enic->config.wq_desc_count,
816                 sizeof(struct cq_enet_wq_desc));
817         if (err) {
818                 vnic_wq_free(wq);
819                 dev_err(enic, "error in allocation of cq for wq\n");
820         }
821
822         /* setup up CQ message */
823         snprintf((char *)name, sizeof(name),
824                  "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
825                 instance++);
826
827         wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
828                                                    sizeof(uint32_t),
829                                                    SOCKET_ID_ANY, 0,
830                                                    ENIC_ALIGN);
831         if (!wq->cqmsg_rz)
832                 return -ENOMEM;
833
834         return err;
835 }
836
837 int enic_disable(struct enic *enic)
838 {
839         unsigned int i;
840         int err;
841
842         vnic_intr_mask(&enic->intr);
843         (void)vnic_intr_masked(&enic->intr); /* flush write */
844         rte_intr_disable(&enic->pdev->intr_handle);
845         rte_intr_callback_unregister(&enic->pdev->intr_handle,
846                                      enic_intr_handler,
847                                      (void *)enic->rte_dev);
848
849         vnic_dev_disable(enic->vdev);
850
851         enic_clsf_destroy(enic);
852
853         if (!enic_is_sriov_vf(enic))
854                 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
855
856         for (i = 0; i < enic->wq_count; i++) {
857                 err = vnic_wq_disable(&enic->wq[i]);
858                 if (err)
859                         return err;
860         }
861         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
862                 if (enic->rq[i].in_use) {
863                         err = vnic_rq_disable(&enic->rq[i]);
864                         if (err)
865                                 return err;
866                 }
867         }
868
869         /* If we were using interrupts, set the interrupt vector to -1
870          * to disable interrupts.  We are not disabling link notifcations,
871          * though, as we want the polling of link status to continue working.
872          */
873         if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
874                 vnic_dev_notify_set(enic->vdev, -1);
875
876         vnic_dev_set_reset_flag(enic->vdev, 1);
877
878         for (i = 0; i < enic->wq_count; i++)
879                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
880
881         for (i = 0; i < enic_vnic_rq_count(enic); i++)
882                 if (enic->rq[i].in_use)
883                         vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
884         for (i = 0; i < enic->cq_count; i++)
885                 vnic_cq_clean(&enic->cq[i]);
886         vnic_intr_clean(&enic->intr);
887
888         return 0;
889 }
890
891 static int enic_dev_wait(struct vnic_dev *vdev,
892         int (*start)(struct vnic_dev *, int),
893         int (*finished)(struct vnic_dev *, int *),
894         int arg)
895 {
896         int done;
897         int err;
898         int i;
899
900         err = start(vdev, arg);
901         if (err)
902                 return err;
903
904         /* Wait for func to complete...2 seconds max */
905         for (i = 0; i < 2000; i++) {
906                 err = finished(vdev, &done);
907                 if (err)
908                         return err;
909                 if (done)
910                         return 0;
911                 usleep(1000);
912         }
913         return -ETIMEDOUT;
914 }
915
916 static int enic_dev_open(struct enic *enic)
917 {
918         int err;
919
920         err = enic_dev_wait(enic->vdev, vnic_dev_open,
921                 vnic_dev_open_done, 0);
922         if (err)
923                 dev_err(enic_get_dev(enic),
924                         "vNIC device open failed, err %d\n", err);
925
926         return err;
927 }
928
929 static int enic_set_rsskey(struct enic *enic)
930 {
931         dma_addr_t rss_key_buf_pa;
932         union vnic_rss_key *rss_key_buf_va = NULL;
933         static union vnic_rss_key rss_key = {
934                 .key = {
935                         [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
936                         [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
937                         [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
938                         [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
939                 }
940         };
941         int err;
942         u8 name[NAME_MAX];
943
944         snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
945         rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
946                 &rss_key_buf_pa, name);
947         if (!rss_key_buf_va)
948                 return -ENOMEM;
949
950         rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
951
952         err = enic_set_rss_key(enic,
953                 rss_key_buf_pa,
954                 sizeof(union vnic_rss_key));
955
956         enic_free_consistent(enic, sizeof(union vnic_rss_key),
957                 rss_key_buf_va, rss_key_buf_pa);
958
959         return err;
960 }
961
962 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
963 {
964         dma_addr_t rss_cpu_buf_pa;
965         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
966         int i;
967         int err;
968         u8 name[NAME_MAX];
969
970         snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
971         rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
972                 &rss_cpu_buf_pa, name);
973         if (!rss_cpu_buf_va)
974                 return -ENOMEM;
975
976         for (i = 0; i < (1 << rss_hash_bits); i++)
977                 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
978                         enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
979
980         err = enic_set_rss_cpu(enic,
981                 rss_cpu_buf_pa,
982                 sizeof(union vnic_rss_cpu));
983
984         enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
985                 rss_cpu_buf_va, rss_cpu_buf_pa);
986
987         return err;
988 }
989
990 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
991         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
992 {
993         const u8 tso_ipid_split_en = 0;
994         int err;
995
996         /* Enable VLAN tag stripping */
997
998         err = enic_set_nic_cfg(enic,
999                 rss_default_cpu, rss_hash_type,
1000                 rss_hash_bits, rss_base_cpu,
1001                 rss_enable, tso_ipid_split_en,
1002                 enic->ig_vlan_strip_en);
1003
1004         return err;
1005 }
1006
1007 int enic_set_rss_nic_cfg(struct enic *enic)
1008 {
1009         const u8 rss_default_cpu = 0;
1010         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1011             NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1012             NIC_CFG_RSS_HASH_TYPE_IPV6 |
1013             NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1014         const u8 rss_hash_bits = 7;
1015         const u8 rss_base_cpu = 0;
1016         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1017
1018         if (rss_enable) {
1019                 if (!enic_set_rsskey(enic)) {
1020                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
1021                                 rss_enable = 0;
1022                                 dev_warning(enic, "RSS disabled, "\
1023                                         "Failed to set RSS cpu indirection table.");
1024                         }
1025                 } else {
1026                         rss_enable = 0;
1027                         dev_warning(enic,
1028                                 "RSS disabled, Failed to set RSS key.\n");
1029                 }
1030         }
1031
1032         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1033                 rss_hash_bits, rss_base_cpu, rss_enable);
1034 }
1035
1036 int enic_setup_finish(struct enic *enic)
1037 {
1038         int ret;
1039
1040         enic_init_soft_stats(enic);
1041
1042         ret = enic_set_rss_nic_cfg(enic);
1043         if (ret) {
1044                 dev_err(enic, "Failed to config nic, aborting.\n");
1045                 return -1;
1046         }
1047
1048         /* Default conf */
1049         vnic_dev_packet_filter(enic->vdev,
1050                 1 /* directed  */,
1051                 1 /* multicast */,
1052                 1 /* broadcast */,
1053                 0 /* promisc   */,
1054                 1 /* allmulti  */);
1055
1056         enic->promisc = 0;
1057         enic->allmulti = 1;
1058
1059         return 0;
1060 }
1061
1062 void enic_add_packet_filter(struct enic *enic)
1063 {
1064         /* Args -> directed, multicast, broadcast, promisc, allmulti */
1065         vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1066                 enic->promisc, enic->allmulti);
1067 }
1068
1069 int enic_get_link_status(struct enic *enic)
1070 {
1071         return vnic_dev_link_status(enic->vdev);
1072 }
1073
1074 static void enic_dev_deinit(struct enic *enic)
1075 {
1076         struct rte_eth_dev *eth_dev = enic->rte_dev;
1077
1078         /* stop link status checking */
1079         vnic_dev_notify_unset(enic->vdev);
1080
1081         rte_free(eth_dev->data->mac_addrs);
1082 }
1083
1084
1085 int enic_set_vnic_res(struct enic *enic)
1086 {
1087         struct rte_eth_dev *eth_dev = enic->rte_dev;
1088         int rc = 0;
1089
1090         /* With Rx scatter support, two RQs are now used per RQ used by
1091          * the application.
1092          */
1093         if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
1094                 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1095                         eth_dev->data->nb_rx_queues,
1096                         eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
1097                 rc = -EINVAL;
1098         }
1099         if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
1100                 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1101                         eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1102                 rc = -EINVAL;
1103         }
1104
1105         if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
1106                                    eth_dev->data->nb_tx_queues)) {
1107                 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1108                         (eth_dev->data->nb_rx_queues +
1109                          eth_dev->data->nb_tx_queues), enic->conf_cq_count);
1110                 rc = -EINVAL;
1111         }
1112
1113         if (rc == 0) {
1114                 enic->rq_count = eth_dev->data->nb_rx_queues;
1115                 enic->wq_count = eth_dev->data->nb_tx_queues;
1116                 enic->cq_count = enic->rq_count + enic->wq_count;
1117         }
1118
1119         return rc;
1120 }
1121
1122 /* Initialize the completion queue for an RQ */
1123 static int
1124 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1125 {
1126         struct vnic_rq *sop_rq, *data_rq;
1127         unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
1128         int rc = 0;
1129
1130         sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1131         data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1132
1133         vnic_cq_clean(&enic->cq[cq_idx]);
1134         vnic_cq_init(&enic->cq[cq_idx],
1135                      0 /* flow_control_enable */,
1136                      1 /* color_enable */,
1137                      0 /* cq_head */,
1138                      0 /* cq_tail */,
1139                      1 /* cq_tail_color */,
1140                      0 /* interrupt_enable */,
1141                      1 /* cq_entry_enable */,
1142                      0 /* cq_message_enable */,
1143                      0 /* interrupt offset */,
1144                      0 /* cq_message_addr */);
1145
1146
1147         vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1148                            enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1149                            sop_rq->ring.desc_count - 1, 1, 0);
1150         if (data_rq->in_use) {
1151                 vnic_rq_init_start(data_rq,
1152                                    enic_cq_rq(enic,
1153                                    enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1154                                    data_rq->ring.desc_count - 1, 1, 0);
1155         }
1156
1157         rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1158         if (rc)
1159                 return rc;
1160
1161         if (data_rq->in_use) {
1162                 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1163                 if (rc) {
1164                         enic_rxmbuf_queue_release(enic, sop_rq);
1165                         return rc;
1166                 }
1167         }
1168
1169         return 0;
1170 }
1171
1172 /* The Cisco NIC can send and receive packets up to a max packet size
1173  * determined by the NIC type and firmware. There is also an MTU
1174  * configured into the NIC via the CIMC/UCSM management interface
1175  * which can be overridden by this function (up to the max packet size).
1176  * Depending on the network setup, doing so may cause packet drops
1177  * and unexpected behavior.
1178  */
1179 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1180 {
1181         unsigned int rq_idx;
1182         struct vnic_rq *rq;
1183         int rc = 0;
1184         uint16_t old_mtu;       /* previous setting */
1185         uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
1186         struct rte_eth_dev *eth_dev = enic->rte_dev;
1187
1188         old_mtu = eth_dev->data->mtu;
1189         config_mtu = enic->config.mtu;
1190
1191         if (new_mtu > enic->max_mtu) {
1192                 dev_err(enic,
1193                         "MTU not updated: requested (%u) greater than max (%u)\n",
1194                         new_mtu, enic->max_mtu);
1195                 return -EINVAL;
1196         }
1197         if (new_mtu < ENIC_MIN_MTU) {
1198                 dev_info(enic,
1199                         "MTU not updated: requested (%u) less than min (%u)\n",
1200                         new_mtu, ENIC_MIN_MTU);
1201                 return -EINVAL;
1202         }
1203         if (new_mtu > config_mtu)
1204                 dev_warning(enic,
1205                         "MTU (%u) is greater than value configured in NIC (%u)\n",
1206                         new_mtu, config_mtu);
1207
1208         /* The easy case is when scatter is disabled. However if the MTU
1209          * becomes greater than the mbuf data size, packet drops will ensue.
1210          */
1211         if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
1212                 eth_dev->data->mtu = new_mtu;
1213                 goto set_mtu_done;
1214         }
1215
1216         /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1217          * change Rx scatter mode if necessary for better performance. I.e. if
1218          * MTU was greater than the mbuf size and now it's less, scatter Rx
1219          * doesn't have to be used and vice versa.
1220           */
1221         rte_spinlock_lock(&enic->mtu_lock);
1222
1223         /* Stop traffic on all RQs */
1224         for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1225                 rq = &enic->rq[rq_idx];
1226                 if (rq->is_sop && rq->in_use) {
1227                         rc = enic_stop_rq(enic,
1228                                           enic_sop_rq_idx_to_rte_idx(rq_idx));
1229                         if (rc) {
1230                                 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1231                                 goto set_mtu_done;
1232                         }
1233                 }
1234         }
1235
1236         /* replace Rx funciton with a no-op to avoid getting stale pkts */
1237         eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1238         rte_mb();
1239
1240         /* Allow time for threads to exit the real Rx function. */
1241         usleep(100000);
1242
1243         /* now it is safe to reconfigure the RQs */
1244
1245         /* update the mtu */
1246         eth_dev->data->mtu = new_mtu;
1247
1248         /* free and reallocate RQs with the new MTU */
1249         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1250                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1251
1252                 enic_free_rq(rq);
1253                 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1254                                    rq->tot_nb_desc, rq->rx_free_thresh);
1255                 if (rc) {
1256                         dev_err(enic,
1257                                 "Fatal MTU alloc error- No traffic will pass\n");
1258                         goto set_mtu_done;
1259                 }
1260
1261                 rc = enic_reinit_rq(enic, rq_idx);
1262                 if (rc) {
1263                         dev_err(enic,
1264                                 "Fatal MTU RQ reinit- No traffic will pass\n");
1265                         goto set_mtu_done;
1266                 }
1267         }
1268
1269         /* put back the real receive function */
1270         rte_mb();
1271         eth_dev->rx_pkt_burst = enic_recv_pkts;
1272         rte_mb();
1273
1274         /* restart Rx traffic */
1275         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1276                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1277                 if (rq->is_sop && rq->in_use)
1278                         enic_start_rq(enic, rq_idx);
1279         }
1280
1281 set_mtu_done:
1282         dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
1283         rte_spinlock_unlock(&enic->mtu_lock);
1284         return rc;
1285 }
1286
1287 static int enic_dev_init(struct enic *enic)
1288 {
1289         int err;
1290         struct rte_eth_dev *eth_dev = enic->rte_dev;
1291
1292         vnic_dev_intr_coal_timer_info_default(enic->vdev);
1293
1294         /* Get vNIC configuration
1295         */
1296         err = enic_get_vnic_config(enic);
1297         if (err) {
1298                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1299                 return err;
1300         }
1301
1302         /* Get available resource counts */
1303         enic_get_res_counts(enic);
1304         if (enic->conf_rq_count == 1) {
1305                 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1306                 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1307                 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1308                 return -EINVAL;
1309         }
1310
1311         /* Get the supported filters */
1312         enic_fdir_info(enic);
1313
1314         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
1315         if (!eth_dev->data->mac_addrs) {
1316                 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1317                 return -1;
1318         }
1319         ether_addr_copy((struct ether_addr *) enic->mac_addr,
1320                 &eth_dev->data->mac_addrs[0]);
1321
1322         vnic_dev_set_reset_flag(enic->vdev, 0);
1323
1324         /* set up link status checking */
1325         vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1326
1327         return 0;
1328
1329 }
1330
1331 int enic_probe(struct enic *enic)
1332 {
1333         struct rte_pci_device *pdev = enic->pdev;
1334         int err = -1;
1335
1336         dev_debug(enic, " Initializing ENIC PMD\n");
1337
1338         enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1339         enic->bar0.len = pdev->mem_resource[0].len;
1340
1341         /* Register vNIC device */
1342         enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1343         if (!enic->vdev) {
1344                 dev_err(enic, "vNIC registration failed, aborting\n");
1345                 goto err_out;
1346         }
1347
1348         LIST_INIT(&enic->memzone_list);
1349         rte_spinlock_init(&enic->memzone_list_lock);
1350
1351         vnic_register_cbacks(enic->vdev,
1352                 enic_alloc_consistent,
1353                 enic_free_consistent);
1354
1355         /* Issue device open to get device in known state */
1356         err = enic_dev_open(enic);
1357         if (err) {
1358                 dev_err(enic, "vNIC dev open failed, aborting\n");
1359                 goto err_out_unregister;
1360         }
1361
1362         /* Set ingress vlan rewrite mode before vnic initialization */
1363         err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1364                 IG_VLAN_REWRITE_MODE_PASS_THRU);
1365         if (err) {
1366                 dev_err(enic,
1367                         "Failed to set ingress vlan rewrite mode, aborting.\n");
1368                 goto err_out_dev_close;
1369         }
1370
1371         /* Issue device init to initialize the vnic-to-switch link.
1372          * We'll start with carrier off and wait for link UP
1373          * notification later to turn on carrier.  We don't need
1374          * to wait here for the vnic-to-switch link initialization
1375          * to complete; link UP notification is the indication that
1376          * the process is complete.
1377          */
1378
1379         err = vnic_dev_init(enic->vdev, 0);
1380         if (err) {
1381                 dev_err(enic, "vNIC dev init failed, aborting\n");
1382                 goto err_out_dev_close;
1383         }
1384
1385         err = enic_dev_init(enic);
1386         if (err) {
1387                 dev_err(enic, "Device initialization failed, aborting\n");
1388                 goto err_out_dev_close;
1389         }
1390
1391         return 0;
1392
1393 err_out_dev_close:
1394         vnic_dev_close(enic->vdev);
1395 err_out_unregister:
1396         vnic_dev_unregister(enic->vdev);
1397 err_out:
1398         return err;
1399 }
1400
1401 void enic_remove(struct enic *enic)
1402 {
1403         enic_dev_deinit(enic);
1404         vnic_dev_close(enic->vdev);
1405         vnic_dev_unregister(enic->vdev);
1406 }