New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <stdio.h>
36
37 #include <sys/stat.h>
38 #include <sys/mman.h>
39 #include <fcntl.h>
40 #include <libgen.h>
41
42 #include <rte_pci.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_mbuf.h>
46 #include <rte_string_fns.h>
47 #include <rte_ethdev.h>
48
49 #include "enic_compat.h"
50 #include "enic.h"
51 #include "wq_enet_desc.h"
52 #include "rq_enet_desc.h"
53 #include "cq_enet_desc.h"
54 #include "vnic_enet.h"
55 #include "vnic_dev.h"
56 #include "vnic_wq.h"
57 #include "vnic_rq.h"
58 #include "vnic_cq.h"
59 #include "vnic_intr.h"
60 #include "vnic_nic.h"
61
62 static inline int enic_is_sriov_vf(struct enic *enic)
63 {
64         return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
65 }
66
67 static int is_zero_addr(uint8_t *addr)
68 {
69         return !(addr[0] |  addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
70 }
71
72 static int is_mcast_addr(uint8_t *addr)
73 {
74         return addr[0] & 1;
75 }
76
77 static int is_eth_addr_valid(uint8_t *addr)
78 {
79         return !is_mcast_addr(addr) && !is_zero_addr(addr);
80 }
81
82 static void
83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
84 {
85         uint16_t i;
86
87         if (!rq || !rq->mbuf_ring) {
88                 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
89                 return;
90         }
91
92         for (i = 0; i < rq->ring.desc_count; i++) {
93                 if (rq->mbuf_ring[i]) {
94                         rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
95                         rq->mbuf_ring[i] = NULL;
96                 }
97         }
98 }
99
100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
101 {
102         vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
103 }
104
105 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
106 {
107         struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
108
109         rte_pktmbuf_free_seg(mbuf);
110         buf->mb = NULL;
111 }
112
113 static void enic_log_q_error(struct enic *enic)
114 {
115         unsigned int i;
116         u32 error_status;
117
118         for (i = 0; i < enic->wq_count; i++) {
119                 error_status = vnic_wq_error_status(&enic->wq[i]);
120                 if (error_status)
121                         dev_err(enic, "WQ[%d] error_status %d\n", i,
122                                 error_status);
123         }
124
125         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
126                 if (!enic->rq[i].in_use)
127                         continue;
128                 error_status = vnic_rq_error_status(&enic->rq[i]);
129                 if (error_status)
130                         dev_err(enic, "RQ[%d] error_status %d\n", i,
131                                 error_status);
132         }
133 }
134
135 static void enic_clear_soft_stats(struct enic *enic)
136 {
137         struct enic_soft_stats *soft_stats = &enic->soft_stats;
138         rte_atomic64_clear(&soft_stats->rx_nombuf);
139         rte_atomic64_clear(&soft_stats->rx_packet_errors);
140         rte_atomic64_clear(&soft_stats->tx_oversized);
141 }
142
143 static void enic_init_soft_stats(struct enic *enic)
144 {
145         struct enic_soft_stats *soft_stats = &enic->soft_stats;
146         rte_atomic64_init(&soft_stats->rx_nombuf);
147         rte_atomic64_init(&soft_stats->rx_packet_errors);
148         rte_atomic64_init(&soft_stats->tx_oversized);
149         enic_clear_soft_stats(enic);
150 }
151
152 void enic_dev_stats_clear(struct enic *enic)
153 {
154         if (vnic_dev_stats_clear(enic->vdev))
155                 dev_err(enic, "Error in clearing stats\n");
156         enic_clear_soft_stats(enic);
157 }
158
159 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
160 {
161         struct vnic_stats *stats;
162         struct enic_soft_stats *soft_stats = &enic->soft_stats;
163         int64_t rx_truncated;
164         uint64_t rx_packet_errors;
165
166         if (vnic_dev_stats_dump(enic->vdev, &stats)) {
167                 dev_err(enic, "Error in getting stats\n");
168                 return;
169         }
170
171         /* The number of truncated packets can only be calculated by
172          * subtracting a hardware counter from error packets received by
173          * the driver. Note: this causes transient inaccuracies in the
174          * ipackets count. Also, the length of truncated packets are
175          * counted in ibytes even though truncated packets are dropped
176          * which can make ibytes be slightly higher than it should be.
177          */
178         rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
179         rx_truncated = rx_packet_errors - stats->rx.rx_errors;
180
181         r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
182         r_stats->opackets = stats->tx.tx_frames_ok;
183
184         r_stats->ibytes = stats->rx.rx_bytes_ok;
185         r_stats->obytes = stats->tx.tx_bytes_ok;
186
187         r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
188         r_stats->oerrors = stats->tx.tx_errors
189                            + rte_atomic64_read(&soft_stats->tx_oversized);
190
191         r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
192
193         r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
194 }
195
196 void enic_del_mac_address(struct enic *enic)
197 {
198         if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
199                 dev_err(enic, "del mac addr failed\n");
200 }
201
202 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
203 {
204         int err;
205
206         if (!is_eth_addr_valid(mac_addr)) {
207                 dev_err(enic, "invalid mac address\n");
208                 return;
209         }
210
211         err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
212         if (err) {
213                 dev_err(enic, "del mac addr failed\n");
214                 return;
215         }
216
217         ether_addr_copy((struct ether_addr *)mac_addr,
218                 (struct ether_addr *)enic->mac_addr);
219
220         err = vnic_dev_add_addr(enic->vdev, mac_addr);
221         if (err) {
222                 dev_err(enic, "add mac addr failed\n");
223                 return;
224         }
225 }
226
227 static void
228 enic_free_rq_buf(struct rte_mbuf **mbuf)
229 {
230         if (*mbuf == NULL)
231                 return;
232
233         rte_pktmbuf_free(*mbuf);
234         *mbuf = NULL;
235 }
236
237 void enic_init_vnic_resources(struct enic *enic)
238 {
239         unsigned int error_interrupt_enable = 1;
240         unsigned int error_interrupt_offset = 0;
241         unsigned int index = 0;
242         unsigned int cq_idx;
243         struct vnic_rq *data_rq;
244
245         for (index = 0; index < enic->rq_count; index++) {
246                 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
247
248                 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
249                         cq_idx,
250                         error_interrupt_enable,
251                         error_interrupt_offset);
252
253                 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
254                 if (data_rq->in_use)
255                         vnic_rq_init(data_rq,
256                                      cq_idx,
257                                      error_interrupt_enable,
258                                      error_interrupt_offset);
259
260                 vnic_cq_init(&enic->cq[cq_idx],
261                         0 /* flow_control_enable */,
262                         1 /* color_enable */,
263                         0 /* cq_head */,
264                         0 /* cq_tail */,
265                         1 /* cq_tail_color */,
266                         0 /* interrupt_enable */,
267                         1 /* cq_entry_enable */,
268                         0 /* cq_message_enable */,
269                         0 /* interrupt offset */,
270                         0 /* cq_message_addr */);
271         }
272
273         for (index = 0; index < enic->wq_count; index++) {
274                 vnic_wq_init(&enic->wq[index],
275                         enic_cq_wq(enic, index),
276                         error_interrupt_enable,
277                         error_interrupt_offset);
278
279                 cq_idx = enic_cq_wq(enic, index);
280                 vnic_cq_init(&enic->cq[cq_idx],
281                         0 /* flow_control_enable */,
282                         1 /* color_enable */,
283                         0 /* cq_head */,
284                         0 /* cq_tail */,
285                         1 /* cq_tail_color */,
286                         0 /* interrupt_enable */,
287                         0 /* cq_entry_enable */,
288                         1 /* cq_message_enable */,
289                         0 /* interrupt offset */,
290                         (u64)enic->wq[index].cqmsg_rz->phys_addr);
291         }
292
293         vnic_intr_init(&enic->intr,
294                 enic->config.intr_timer_usec,
295                 enic->config.intr_timer_type,
296                 /*mask_on_assertion*/1);
297 }
298
299
300 static int
301 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
302 {
303         struct rte_mbuf *mb;
304         struct rq_enet_desc *rqd = rq->ring.descs;
305         unsigned i;
306         dma_addr_t dma_addr;
307
308         if (!rq->in_use)
309                 return 0;
310
311         dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
312                   rq->ring.desc_count);
313
314         for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
315                 mb = rte_mbuf_raw_alloc(rq->mp);
316                 if (mb == NULL) {
317                         dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
318                         (unsigned)rq->index);
319                         return -ENOMEM;
320                 }
321
322                 mb->data_off = RTE_PKTMBUF_HEADROOM;
323                 dma_addr = (dma_addr_t)(mb->buf_physaddr
324                            + RTE_PKTMBUF_HEADROOM);
325                 rq_enet_desc_enc(rqd, dma_addr,
326                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
327                                 : RQ_ENET_TYPE_NOT_SOP),
328                                 mb->buf_len - RTE_PKTMBUF_HEADROOM);
329                 rq->mbuf_ring[i] = mb;
330         }
331
332         /* make sure all prior writes are complete before doing the PIO write */
333         rte_rmb();
334
335         /* Post all but the last buffer to VIC. */
336         rq->posted_index = rq->ring.desc_count - 1;
337
338         rq->rx_nb_hold = 0;
339
340         dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
341                 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
342         iowrite32(rq->posted_index, &rq->ctrl->posted_index);
343         iowrite32(0, &rq->ctrl->fetch_index);
344         rte_rmb();
345
346         return 0;
347
348 }
349
350 static void *
351 enic_alloc_consistent(void *priv, size_t size,
352         dma_addr_t *dma_handle, u8 *name)
353 {
354         void *vaddr;
355         const struct rte_memzone *rz;
356         *dma_handle = 0;
357         struct enic *enic = (struct enic *)priv;
358         struct enic_memzone_entry *mze;
359
360         rz = rte_memzone_reserve_aligned((const char *)name,
361                                          size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
362         if (!rz) {
363                 pr_err("%s : Failed to allocate memory requested for %s\n",
364                         __func__, name);
365                 return NULL;
366         }
367
368         vaddr = rz->addr;
369         *dma_handle = (dma_addr_t)rz->phys_addr;
370
371         mze = rte_malloc("enic memzone entry",
372                          sizeof(struct enic_memzone_entry), 0);
373
374         if (!mze) {
375                 pr_err("%s : Failed to allocate memory for memzone list\n",
376                        __func__);
377                 rte_memzone_free(rz);
378                 return NULL;
379         }
380
381         mze->rz = rz;
382
383         rte_spinlock_lock(&enic->memzone_list_lock);
384         LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
385         rte_spinlock_unlock(&enic->memzone_list_lock);
386
387         return vaddr;
388 }
389
390 static void
391 enic_free_consistent(void *priv,
392                      __rte_unused size_t size,
393                      void *vaddr,
394                      dma_addr_t dma_handle)
395 {
396         struct enic_memzone_entry *mze;
397         struct enic *enic = (struct enic *)priv;
398
399         rte_spinlock_lock(&enic->memzone_list_lock);
400         LIST_FOREACH(mze, &enic->memzone_list, entries) {
401                 if (mze->rz->addr == vaddr &&
402                     mze->rz->phys_addr == dma_handle)
403                         break;
404         }
405         if (mze == NULL) {
406                 rte_spinlock_unlock(&enic->memzone_list_lock);
407                 dev_warning(enic,
408                             "Tried to free memory, but couldn't find it in the memzone list\n");
409                 return;
410         }
411         LIST_REMOVE(mze, entries);
412         rte_spinlock_unlock(&enic->memzone_list_lock);
413         rte_memzone_free(mze->rz);
414         rte_free(mze);
415 }
416
417 int enic_link_update(struct enic *enic)
418 {
419         struct rte_eth_dev *eth_dev = enic->rte_dev;
420         int ret;
421         int link_status = 0;
422
423         link_status = enic_get_link_status(enic);
424         ret = (link_status == enic->link_status);
425         enic->link_status = link_status;
426         eth_dev->data->dev_link.link_status = link_status;
427         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
428         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
429         return ret;
430 }
431
432 static void
433 enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
434         void *arg)
435 {
436         struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
437         struct enic *enic = pmd_priv(dev);
438
439         vnic_intr_return_all_credits(&enic->intr);
440
441         enic_link_update(enic);
442         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
443         enic_log_q_error(enic);
444 }
445
446 int enic_enable(struct enic *enic)
447 {
448         unsigned int index;
449         int err;
450         struct rte_eth_dev *eth_dev = enic->rte_dev;
451
452         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
453         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
454
455         /* vnic notification of link status has already been turned on in
456          * enic_dev_init() which is called during probe time.  Here we are
457          * just turning on interrupt vector 0 if needed.
458          */
459         if (eth_dev->data->dev_conf.intr_conf.lsc)
460                 vnic_dev_notify_set(enic->vdev, 0);
461
462         if (enic_clsf_init(enic))
463                 dev_warning(enic, "Init of hash table for clsf failed."\
464                         "Flow director feature will not work\n");
465
466         for (index = 0; index < enic->rq_count; index++) {
467                 err = enic_alloc_rx_queue_mbufs(enic,
468                         &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
469                 if (err) {
470                         dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
471                         return err;
472                 }
473                 err = enic_alloc_rx_queue_mbufs(enic,
474                         &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
475                 if (err) {
476                         /* release the allocated mbufs for the sop rq*/
477                         enic_rxmbuf_queue_release(enic,
478                                 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
479
480                         dev_err(enic, "Failed to alloc data RX queue mbufs\n");
481                         return err;
482                 }
483         }
484
485         for (index = 0; index < enic->wq_count; index++)
486                 enic_start_wq(enic, index);
487         for (index = 0; index < enic->rq_count; index++)
488                 enic_start_rq(enic, index);
489
490         vnic_dev_add_addr(enic->vdev, enic->mac_addr);
491
492         vnic_dev_enable_wait(enic->vdev);
493
494         /* Register and enable error interrupt */
495         rte_intr_callback_register(&(enic->pdev->intr_handle),
496                 enic_intr_handler, (void *)enic->rte_dev);
497
498         rte_intr_enable(&(enic->pdev->intr_handle));
499         vnic_intr_unmask(&enic->intr);
500
501         return 0;
502 }
503
504 int enic_alloc_intr_resources(struct enic *enic)
505 {
506         int err;
507
508         dev_info(enic, "vNIC resources used:  "\
509                 "wq %d rq %d cq %d intr %d\n",
510                 enic->wq_count, enic_vnic_rq_count(enic),
511                 enic->cq_count, enic->intr_count);
512
513         err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
514         if (err)
515                 enic_free_vnic_resources(enic);
516
517         return err;
518 }
519
520 void enic_free_rq(void *rxq)
521 {
522         struct vnic_rq *rq_sop, *rq_data;
523         struct enic *enic;
524
525         if (rxq == NULL)
526                 return;
527
528         rq_sop = (struct vnic_rq *)rxq;
529         enic = vnic_dev_priv(rq_sop->vdev);
530         rq_data = &enic->rq[rq_sop->data_queue_idx];
531
532         enic_rxmbuf_queue_release(enic, rq_sop);
533         if (rq_data->in_use)
534                 enic_rxmbuf_queue_release(enic, rq_data);
535
536         rte_free(rq_sop->mbuf_ring);
537         if (rq_data->in_use)
538                 rte_free(rq_data->mbuf_ring);
539
540         rq_sop->mbuf_ring = NULL;
541         rq_data->mbuf_ring = NULL;
542
543         vnic_rq_free(rq_sop);
544         if (rq_data->in_use)
545                 vnic_rq_free(rq_data);
546
547         vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
548
549         rq_sop->in_use = 0;
550         rq_data->in_use = 0;
551 }
552
553 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
554 {
555         struct rte_eth_dev *eth_dev = enic->rte_dev;
556         vnic_wq_enable(&enic->wq[queue_idx]);
557         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
558 }
559
560 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
561 {
562         struct rte_eth_dev *eth_dev = enic->rte_dev;
563         int ret;
564
565         ret = vnic_wq_disable(&enic->wq[queue_idx]);
566         if (ret)
567                 return ret;
568
569         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
570         return 0;
571 }
572
573 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
574 {
575         struct vnic_rq *rq_sop;
576         struct vnic_rq *rq_data;
577         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
578         rq_data = &enic->rq[rq_sop->data_queue_idx];
579         struct rte_eth_dev *eth_dev = enic->rte_dev;
580
581         if (rq_data->in_use)
582                 vnic_rq_enable(rq_data);
583         rte_mb();
584         vnic_rq_enable(rq_sop);
585         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
586 }
587
588 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
589 {
590         int ret1 = 0, ret2 = 0;
591         struct rte_eth_dev *eth_dev = enic->rte_dev;
592         struct vnic_rq *rq_sop;
593         struct vnic_rq *rq_data;
594         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
595         rq_data = &enic->rq[rq_sop->data_queue_idx];
596
597         ret2 = vnic_rq_disable(rq_sop);
598         rte_mb();
599         if (rq_data->in_use)
600                 ret1 = vnic_rq_disable(rq_data);
601
602         if (ret2)
603                 return ret2;
604         else if (ret1)
605                 return ret1;
606
607         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
608         return 0;
609 }
610
611 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
612         unsigned int socket_id, struct rte_mempool *mp,
613         uint16_t nb_desc, uint16_t free_thresh)
614 {
615         int rc;
616         uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
617         uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
618         struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
619         struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
620         unsigned int mbuf_size, mbufs_per_pkt;
621         unsigned int nb_sop_desc, nb_data_desc;
622         uint16_t min_sop, max_sop, min_data, max_data;
623         uint16_t mtu = enic->rte_dev->data->mtu;
624
625         rq_sop->is_sop = 1;
626         rq_sop->data_queue_idx = data_queue_idx;
627         rq_data->is_sop = 0;
628         rq_data->data_queue_idx = 0;
629         rq_sop->socket_id = socket_id;
630         rq_sop->mp = mp;
631         rq_data->socket_id = socket_id;
632         rq_data->mp = mp;
633         rq_sop->in_use = 1;
634         rq_sop->rx_free_thresh = free_thresh;
635         rq_data->rx_free_thresh = free_thresh;
636         dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
637                   free_thresh);
638
639         mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
640                                RTE_PKTMBUF_HEADROOM);
641
642         if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
643                 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
644                 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
645                 mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
646                                  (mbuf_size - 1)) / mbuf_size;
647         } else {
648                 dev_info(enic, "Scatter rx mode disabled\n");
649                 mbufs_per_pkt = 1;
650         }
651
652         if (mbufs_per_pkt > 1) {
653                 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
654                 rq_sop->data_queue_enable = 1;
655                 rq_data->in_use = 1;
656         } else {
657                 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
658                          queue_idx);
659                 rq_sop->data_queue_enable = 0;
660                 rq_data->in_use = 0;
661         }
662
663         /* number of descriptors have to be a multiple of 32 */
664         nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
665         nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
666
667         rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
668         rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
669
670         if (mbufs_per_pkt > 1) {
671                 min_sop = 64;
672                 max_sop = ((enic->config.rq_desc_count /
673                             (mbufs_per_pkt - 1)) & ~0x1F);
674                 min_data = min_sop * (mbufs_per_pkt - 1);
675                 max_data = enic->config.rq_desc_count;
676         } else {
677                 min_sop = 64;
678                 max_sop = enic->config.rq_desc_count;
679                 min_data = 0;
680                 max_data = 0;
681         }
682
683         if (nb_desc < (min_sop + min_data)) {
684                 dev_warning(enic,
685                             "Number of rx descs too low, adjusting to minimum\n");
686                 nb_sop_desc = min_sop;
687                 nb_data_desc = min_data;
688         } else if (nb_desc > (max_sop + max_data)) {
689                 dev_warning(enic,
690                             "Number of rx_descs too high, adjusting to maximum\n");
691                 nb_sop_desc = max_sop;
692                 nb_data_desc = max_data;
693         }
694         if (mbufs_per_pkt > 1) {
695                 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
696                          mtu, mbuf_size, min_sop + min_data,
697                          max_sop + max_data);
698         }
699         dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
700                  nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
701
702         /* Allocate sop queue resources */
703         rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
704                 nb_sop_desc, sizeof(struct rq_enet_desc));
705         if (rc) {
706                 dev_err(enic, "error in allocation of sop rq\n");
707                 goto err_exit;
708         }
709         nb_sop_desc = rq_sop->ring.desc_count;
710
711         if (rq_data->in_use) {
712                 /* Allocate data queue resources */
713                 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
714                                    nb_data_desc,
715                                    sizeof(struct rq_enet_desc));
716                 if (rc) {
717                         dev_err(enic, "error in allocation of data rq\n");
718                         goto err_free_rq_sop;
719                 }
720                 nb_data_desc = rq_data->ring.desc_count;
721         }
722         rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
723                            socket_id, nb_sop_desc + nb_data_desc,
724                            sizeof(struct cq_enet_rq_desc));
725         if (rc) {
726                 dev_err(enic, "error in allocation of cq for rq\n");
727                 goto err_free_rq_data;
728         }
729
730         /* Allocate the mbuf rings */
731         rq_sop->mbuf_ring = (struct rte_mbuf **)
732                 rte_zmalloc_socket("rq->mbuf_ring",
733                                    sizeof(struct rte_mbuf *) * nb_sop_desc,
734                                    RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
735         if (rq_sop->mbuf_ring == NULL)
736                 goto err_free_cq;
737
738         if (rq_data->in_use) {
739                 rq_data->mbuf_ring = (struct rte_mbuf **)
740                         rte_zmalloc_socket("rq->mbuf_ring",
741                                 sizeof(struct rte_mbuf *) * nb_data_desc,
742                                 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
743                 if (rq_data->mbuf_ring == NULL)
744                         goto err_free_sop_mbuf;
745         }
746
747         rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
748
749         return 0;
750
751 err_free_sop_mbuf:
752         rte_free(rq_sop->mbuf_ring);
753 err_free_cq:
754         /* cleanup on error */
755         vnic_cq_free(&enic->cq[queue_idx]);
756 err_free_rq_data:
757         if (rq_data->in_use)
758                 vnic_rq_free(rq_data);
759 err_free_rq_sop:
760         vnic_rq_free(rq_sop);
761 err_exit:
762         return -ENOMEM;
763 }
764
765 void enic_free_wq(void *txq)
766 {
767         struct vnic_wq *wq;
768         struct enic *enic;
769
770         if (txq == NULL)
771                 return;
772
773         wq = (struct vnic_wq *)txq;
774         enic = vnic_dev_priv(wq->vdev);
775         rte_memzone_free(wq->cqmsg_rz);
776         vnic_wq_free(wq);
777         vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
778 }
779
780 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
781         unsigned int socket_id, uint16_t nb_desc)
782 {
783         int err;
784         struct vnic_wq *wq = &enic->wq[queue_idx];
785         unsigned int cq_index = enic_cq_wq(enic, queue_idx);
786         char name[NAME_MAX];
787         static int instance;
788
789         wq->socket_id = socket_id;
790         if (nb_desc > enic->config.wq_desc_count) {
791                 dev_warning(enic,
792                             "WQ %d - number of tx desc in cmd line (%d) "
793                             "is greater than that in the UCSM/CIMC adapter "
794                             "policy.  Applying the value in the adapter "
795                             "policy (%d)\n",
796                             queue_idx, nb_desc, enic->config.wq_desc_count);
797                 nb_desc = enic->config.wq_desc_count;
798         } else if (nb_desc != enic->config.wq_desc_count) {
799                 dev_info(enic,
800                          "TX Queues - effective number of descs:%d\n",
801                          nb_desc);
802         }
803
804         /* Allocate queue resources */
805         err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
806                 nb_desc,
807                 sizeof(struct wq_enet_desc));
808         if (err) {
809                 dev_err(enic, "error in allocation of wq\n");
810                 return err;
811         }
812
813         err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
814                 socket_id, nb_desc,
815                 sizeof(struct cq_enet_wq_desc));
816         if (err) {
817                 vnic_wq_free(wq);
818                 dev_err(enic, "error in allocation of cq for wq\n");
819         }
820
821         /* setup up CQ message */
822         snprintf((char *)name, sizeof(name),
823                  "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
824                 instance++);
825
826         wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
827                                                    sizeof(uint32_t),
828                                                    SOCKET_ID_ANY, 0,
829                                                    ENIC_ALIGN);
830         if (!wq->cqmsg_rz)
831                 return -ENOMEM;
832
833         return err;
834 }
835
836 int enic_disable(struct enic *enic)
837 {
838         unsigned int i;
839         int err;
840
841         vnic_intr_mask(&enic->intr);
842         (void)vnic_intr_masked(&enic->intr); /* flush write */
843         rte_intr_disable(&enic->pdev->intr_handle);
844         rte_intr_callback_unregister(&enic->pdev->intr_handle,
845                                      enic_intr_handler,
846                                      (void *)enic->rte_dev);
847
848         vnic_dev_disable(enic->vdev);
849
850         enic_clsf_destroy(enic);
851
852         if (!enic_is_sriov_vf(enic))
853                 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
854
855         for (i = 0; i < enic->wq_count; i++) {
856                 err = vnic_wq_disable(&enic->wq[i]);
857                 if (err)
858                         return err;
859         }
860         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
861                 if (enic->rq[i].in_use) {
862                         err = vnic_rq_disable(&enic->rq[i]);
863                         if (err)
864                                 return err;
865                 }
866         }
867
868         /* If we were using interrupts, set the interrupt vector to -1
869          * to disable interrupts.  We are not disabling link notifcations,
870          * though, as we want the polling of link status to continue working.
871          */
872         if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
873                 vnic_dev_notify_set(enic->vdev, -1);
874
875         vnic_dev_set_reset_flag(enic->vdev, 1);
876
877         for (i = 0; i < enic->wq_count; i++)
878                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
879
880         for (i = 0; i < enic_vnic_rq_count(enic); i++)
881                 if (enic->rq[i].in_use)
882                         vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
883         for (i = 0; i < enic->cq_count; i++)
884                 vnic_cq_clean(&enic->cq[i]);
885         vnic_intr_clean(&enic->intr);
886
887         return 0;
888 }
889
890 static int enic_dev_wait(struct vnic_dev *vdev,
891         int (*start)(struct vnic_dev *, int),
892         int (*finished)(struct vnic_dev *, int *),
893         int arg)
894 {
895         int done;
896         int err;
897         int i;
898
899         err = start(vdev, arg);
900         if (err)
901                 return err;
902
903         /* Wait for func to complete...2 seconds max */
904         for (i = 0; i < 2000; i++) {
905                 err = finished(vdev, &done);
906                 if (err)
907                         return err;
908                 if (done)
909                         return 0;
910                 usleep(1000);
911         }
912         return -ETIMEDOUT;
913 }
914
915 static int enic_dev_open(struct enic *enic)
916 {
917         int err;
918
919         err = enic_dev_wait(enic->vdev, vnic_dev_open,
920                 vnic_dev_open_done, 0);
921         if (err)
922                 dev_err(enic_get_dev(enic),
923                         "vNIC device open failed, err %d\n", err);
924
925         return err;
926 }
927
928 static int enic_set_rsskey(struct enic *enic)
929 {
930         dma_addr_t rss_key_buf_pa;
931         union vnic_rss_key *rss_key_buf_va = NULL;
932         static union vnic_rss_key rss_key = {
933                 .key = {
934                         [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
935                         [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
936                         [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
937                         [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
938                 }
939         };
940         int err;
941         u8 name[NAME_MAX];
942
943         snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
944         rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
945                 &rss_key_buf_pa, name);
946         if (!rss_key_buf_va)
947                 return -ENOMEM;
948
949         rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
950
951         err = enic_set_rss_key(enic,
952                 rss_key_buf_pa,
953                 sizeof(union vnic_rss_key));
954
955         enic_free_consistent(enic, sizeof(union vnic_rss_key),
956                 rss_key_buf_va, rss_key_buf_pa);
957
958         return err;
959 }
960
961 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
962 {
963         dma_addr_t rss_cpu_buf_pa;
964         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
965         int i;
966         int err;
967         u8 name[NAME_MAX];
968
969         snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
970         rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
971                 &rss_cpu_buf_pa, name);
972         if (!rss_cpu_buf_va)
973                 return -ENOMEM;
974
975         for (i = 0; i < (1 << rss_hash_bits); i++)
976                 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
977                         enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
978
979         err = enic_set_rss_cpu(enic,
980                 rss_cpu_buf_pa,
981                 sizeof(union vnic_rss_cpu));
982
983         enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
984                 rss_cpu_buf_va, rss_cpu_buf_pa);
985
986         return err;
987 }
988
989 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
990         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
991 {
992         const u8 tso_ipid_split_en = 0;
993         int err;
994
995         /* Enable VLAN tag stripping */
996
997         err = enic_set_nic_cfg(enic,
998                 rss_default_cpu, rss_hash_type,
999                 rss_hash_bits, rss_base_cpu,
1000                 rss_enable, tso_ipid_split_en,
1001                 enic->ig_vlan_strip_en);
1002
1003         return err;
1004 }
1005
1006 int enic_set_rss_nic_cfg(struct enic *enic)
1007 {
1008         const u8 rss_default_cpu = 0;
1009         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1010             NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1011             NIC_CFG_RSS_HASH_TYPE_IPV6 |
1012             NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1013         const u8 rss_hash_bits = 7;
1014         const u8 rss_base_cpu = 0;
1015         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1016
1017         if (rss_enable) {
1018                 if (!enic_set_rsskey(enic)) {
1019                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
1020                                 rss_enable = 0;
1021                                 dev_warning(enic, "RSS disabled, "\
1022                                         "Failed to set RSS cpu indirection table.");
1023                         }
1024                 } else {
1025                         rss_enable = 0;
1026                         dev_warning(enic,
1027                                 "RSS disabled, Failed to set RSS key.\n");
1028                 }
1029         }
1030
1031         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1032                 rss_hash_bits, rss_base_cpu, rss_enable);
1033 }
1034
1035 int enic_setup_finish(struct enic *enic)
1036 {
1037         int ret;
1038
1039         enic_init_soft_stats(enic);
1040
1041         ret = enic_set_rss_nic_cfg(enic);
1042         if (ret) {
1043                 dev_err(enic, "Failed to config nic, aborting.\n");
1044                 return -1;
1045         }
1046
1047         /* Default conf */
1048         vnic_dev_packet_filter(enic->vdev,
1049                 1 /* directed  */,
1050                 1 /* multicast */,
1051                 1 /* broadcast */,
1052                 0 /* promisc   */,
1053                 1 /* allmulti  */);
1054
1055         enic->promisc = 0;
1056         enic->allmulti = 1;
1057
1058         return 0;
1059 }
1060
1061 void enic_add_packet_filter(struct enic *enic)
1062 {
1063         /* Args -> directed, multicast, broadcast, promisc, allmulti */
1064         vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1065                 enic->promisc, enic->allmulti);
1066 }
1067
1068 int enic_get_link_status(struct enic *enic)
1069 {
1070         return vnic_dev_link_status(enic->vdev);
1071 }
1072
1073 static void enic_dev_deinit(struct enic *enic)
1074 {
1075         struct rte_eth_dev *eth_dev = enic->rte_dev;
1076
1077         /* stop link status checking */
1078         vnic_dev_notify_unset(enic->vdev);
1079
1080         rte_free(eth_dev->data->mac_addrs);
1081         rte_free(enic->cq);
1082         rte_free(enic->rq);
1083         rte_free(enic->wq);
1084 }
1085
1086
1087 int enic_set_vnic_res(struct enic *enic)
1088 {
1089         struct rte_eth_dev *eth_dev = enic->rte_dev;
1090         int rc = 0;
1091         unsigned int required_rq, required_wq, required_cq;
1092
1093         /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1094         required_rq = eth_dev->data->nb_rx_queues * 2;
1095         required_wq = eth_dev->data->nb_tx_queues;
1096         required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1097
1098         if (enic->conf_rq_count < required_rq) {
1099                 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1100                         eth_dev->data->nb_rx_queues,
1101                         required_rq, enic->conf_rq_count);
1102                 rc = -EINVAL;
1103         }
1104         if (enic->conf_wq_count < required_wq) {
1105                 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1106                         eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1107                 rc = -EINVAL;
1108         }
1109
1110         if (enic->conf_cq_count < required_cq) {
1111                 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1112                         required_cq, enic->conf_cq_count);
1113                 rc = -EINVAL;
1114         }
1115
1116         if (rc == 0) {
1117                 enic->rq_count = eth_dev->data->nb_rx_queues;
1118                 enic->wq_count = eth_dev->data->nb_tx_queues;
1119                 enic->cq_count = enic->rq_count + enic->wq_count;
1120         }
1121
1122         return rc;
1123 }
1124
1125 /* Initialize the completion queue for an RQ */
1126 static int
1127 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1128 {
1129         struct vnic_rq *sop_rq, *data_rq;
1130         unsigned int cq_idx;
1131         int rc = 0;
1132
1133         sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1134         data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1135         cq_idx = rq_idx;
1136
1137         vnic_cq_clean(&enic->cq[cq_idx]);
1138         vnic_cq_init(&enic->cq[cq_idx],
1139                      0 /* flow_control_enable */,
1140                      1 /* color_enable */,
1141                      0 /* cq_head */,
1142                      0 /* cq_tail */,
1143                      1 /* cq_tail_color */,
1144                      0 /* interrupt_enable */,
1145                      1 /* cq_entry_enable */,
1146                      0 /* cq_message_enable */,
1147                      0 /* interrupt offset */,
1148                      0 /* cq_message_addr */);
1149
1150
1151         vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1152                            enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1153                            sop_rq->ring.desc_count - 1, 1, 0);
1154         if (data_rq->in_use) {
1155                 vnic_rq_init_start(data_rq,
1156                                    enic_cq_rq(enic,
1157                                    enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1158                                    data_rq->ring.desc_count - 1, 1, 0);
1159         }
1160
1161         rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1162         if (rc)
1163                 return rc;
1164
1165         if (data_rq->in_use) {
1166                 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1167                 if (rc) {
1168                         enic_rxmbuf_queue_release(enic, sop_rq);
1169                         return rc;
1170                 }
1171         }
1172
1173         return 0;
1174 }
1175
1176 /* The Cisco NIC can send and receive packets up to a max packet size
1177  * determined by the NIC type and firmware. There is also an MTU
1178  * configured into the NIC via the CIMC/UCSM management interface
1179  * which can be overridden by this function (up to the max packet size).
1180  * Depending on the network setup, doing so may cause packet drops
1181  * and unexpected behavior.
1182  */
1183 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1184 {
1185         unsigned int rq_idx;
1186         struct vnic_rq *rq;
1187         int rc = 0;
1188         uint16_t old_mtu;       /* previous setting */
1189         uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
1190         struct rte_eth_dev *eth_dev = enic->rte_dev;
1191
1192         old_mtu = eth_dev->data->mtu;
1193         config_mtu = enic->config.mtu;
1194
1195         if (new_mtu > enic->max_mtu) {
1196                 dev_err(enic,
1197                         "MTU not updated: requested (%u) greater than max (%u)\n",
1198                         new_mtu, enic->max_mtu);
1199                 return -EINVAL;
1200         }
1201         if (new_mtu < ENIC_MIN_MTU) {
1202                 dev_info(enic,
1203                         "MTU not updated: requested (%u) less than min (%u)\n",
1204                         new_mtu, ENIC_MIN_MTU);
1205                 return -EINVAL;
1206         }
1207         if (new_mtu > config_mtu)
1208                 dev_warning(enic,
1209                         "MTU (%u) is greater than value configured in NIC (%u)\n",
1210                         new_mtu, config_mtu);
1211
1212         /* The easy case is when scatter is disabled. However if the MTU
1213          * becomes greater than the mbuf data size, packet drops will ensue.
1214          */
1215         if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
1216                 eth_dev->data->mtu = new_mtu;
1217                 goto set_mtu_done;
1218         }
1219
1220         /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1221          * change Rx scatter mode if necessary for better performance. I.e. if
1222          * MTU was greater than the mbuf size and now it's less, scatter Rx
1223          * doesn't have to be used and vice versa.
1224           */
1225         rte_spinlock_lock(&enic->mtu_lock);
1226
1227         /* Stop traffic on all RQs */
1228         for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1229                 rq = &enic->rq[rq_idx];
1230                 if (rq->is_sop && rq->in_use) {
1231                         rc = enic_stop_rq(enic,
1232                                           enic_sop_rq_idx_to_rte_idx(rq_idx));
1233                         if (rc) {
1234                                 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1235                                 goto set_mtu_done;
1236                         }
1237                 }
1238         }
1239
1240         /* replace Rx funciton with a no-op to avoid getting stale pkts */
1241         eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1242         rte_mb();
1243
1244         /* Allow time for threads to exit the real Rx function. */
1245         usleep(100000);
1246
1247         /* now it is safe to reconfigure the RQs */
1248
1249         /* update the mtu */
1250         eth_dev->data->mtu = new_mtu;
1251
1252         /* free and reallocate RQs with the new MTU */
1253         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1254                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1255                 if (!rq->in_use)
1256                         continue;
1257
1258                 enic_free_rq(rq);
1259                 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1260                                    rq->tot_nb_desc, rq->rx_free_thresh);
1261                 if (rc) {
1262                         dev_err(enic,
1263                                 "Fatal MTU alloc error- No traffic will pass\n");
1264                         goto set_mtu_done;
1265                 }
1266
1267                 rc = enic_reinit_rq(enic, rq_idx);
1268                 if (rc) {
1269                         dev_err(enic,
1270                                 "Fatal MTU RQ reinit- No traffic will pass\n");
1271                         goto set_mtu_done;
1272                 }
1273         }
1274
1275         /* put back the real receive function */
1276         rte_mb();
1277         eth_dev->rx_pkt_burst = enic_recv_pkts;
1278         rte_mb();
1279
1280         /* restart Rx traffic */
1281         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1282                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1283                 if (rq->is_sop && rq->in_use)
1284                         enic_start_rq(enic, rq_idx);
1285         }
1286
1287 set_mtu_done:
1288         dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
1289         rte_spinlock_unlock(&enic->mtu_lock);
1290         return rc;
1291 }
1292
1293 static int enic_dev_init(struct enic *enic)
1294 {
1295         int err;
1296         struct rte_eth_dev *eth_dev = enic->rte_dev;
1297
1298         vnic_dev_intr_coal_timer_info_default(enic->vdev);
1299
1300         /* Get vNIC configuration
1301         */
1302         err = enic_get_vnic_config(enic);
1303         if (err) {
1304                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1305                 return err;
1306         }
1307
1308         /* Get available resource counts */
1309         enic_get_res_counts(enic);
1310         if (enic->conf_rq_count == 1) {
1311                 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1312                 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1313                 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1314                 return -EINVAL;
1315         }
1316         /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1317         enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1318                                enic->conf_cq_count, 8);
1319         enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1320                                enic->conf_rq_count, 8);
1321         enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1322                                enic->conf_wq_count, 8);
1323         if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1324                 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1325                 return -1;
1326         }
1327         if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1328                 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1329                 return -1;
1330         }
1331         if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1332                 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1333                 return -1;
1334         }
1335
1336         /* Get the supported filters */
1337         enic_fdir_info(enic);
1338
1339         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
1340         if (!eth_dev->data->mac_addrs) {
1341                 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1342                 return -1;
1343         }
1344         ether_addr_copy((struct ether_addr *) enic->mac_addr,
1345                 &eth_dev->data->mac_addrs[0]);
1346
1347         vnic_dev_set_reset_flag(enic->vdev, 0);
1348
1349         /* set up link status checking */
1350         vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1351
1352         return 0;
1353
1354 }
1355
1356 int enic_probe(struct enic *enic)
1357 {
1358         struct rte_pci_device *pdev = enic->pdev;
1359         int err = -1;
1360
1361         dev_debug(enic, " Initializing ENIC PMD\n");
1362
1363         enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1364         enic->bar0.len = pdev->mem_resource[0].len;
1365
1366         /* Register vNIC device */
1367         enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1368         if (!enic->vdev) {
1369                 dev_err(enic, "vNIC registration failed, aborting\n");
1370                 goto err_out;
1371         }
1372
1373         LIST_INIT(&enic->memzone_list);
1374         rte_spinlock_init(&enic->memzone_list_lock);
1375
1376         vnic_register_cbacks(enic->vdev,
1377                 enic_alloc_consistent,
1378                 enic_free_consistent);
1379
1380         /*
1381          * Allocate the consistent memory for stats upfront so both primary and
1382          * secondary processes can dump stats.
1383          */
1384         err = vnic_dev_alloc_stats_mem(enic->vdev);
1385         if (err) {
1386                 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1387                 goto err_out_unregister;
1388         }
1389         /* Issue device open to get device in known state */
1390         err = enic_dev_open(enic);
1391         if (err) {
1392                 dev_err(enic, "vNIC dev open failed, aborting\n");
1393                 goto err_out_unregister;
1394         }
1395
1396         /* Set ingress vlan rewrite mode before vnic initialization */
1397         dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
1398                   enic->ig_vlan_rewrite_mode);
1399         err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1400                 enic->ig_vlan_rewrite_mode);
1401         if (err) {
1402                 dev_err(enic,
1403                         "Failed to set ingress vlan rewrite mode, aborting.\n");
1404                 goto err_out_dev_close;
1405         }
1406
1407         /* Issue device init to initialize the vnic-to-switch link.
1408          * We'll start with carrier off and wait for link UP
1409          * notification later to turn on carrier.  We don't need
1410          * to wait here for the vnic-to-switch link initialization
1411          * to complete; link UP notification is the indication that
1412          * the process is complete.
1413          */
1414
1415         err = vnic_dev_init(enic->vdev, 0);
1416         if (err) {
1417                 dev_err(enic, "vNIC dev init failed, aborting\n");
1418                 goto err_out_dev_close;
1419         }
1420
1421         err = enic_dev_init(enic);
1422         if (err) {
1423                 dev_err(enic, "Device initialization failed, aborting\n");
1424                 goto err_out_dev_close;
1425         }
1426
1427         return 0;
1428
1429 err_out_dev_close:
1430         vnic_dev_close(enic->vdev);
1431 err_out_unregister:
1432         vnic_dev_unregister(enic->vdev);
1433 err_out:
1434         return err;
1435 }
1436
1437 void enic_remove(struct enic *enic)
1438 {
1439         enic_dev_deinit(enic);
1440         vnic_dev_close(enic->vdev);
1441         vnic_dev_unregister(enic->vdev);
1442 }