2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
46 #include <rte_string_fns.h>
47 #include <rte_ethdev.h>
49 #include "enic_compat.h"
51 #include "wq_enet_desc.h"
52 #include "rq_enet_desc.h"
53 #include "cq_enet_desc.h"
54 #include "vnic_enet.h"
59 #include "vnic_intr.h"
62 static inline int enic_is_sriov_vf(struct enic *enic)
64 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
67 static int is_zero_addr(uint8_t *addr)
69 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
72 static int is_mcast_addr(uint8_t *addr)
77 static int is_eth_addr_valid(uint8_t *addr)
79 return !is_mcast_addr(addr) && !is_zero_addr(addr);
83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
87 if (!rq || !rq->mbuf_ring) {
88 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
92 for (i = 0; i < rq->ring.desc_count; i++) {
93 if (rq->mbuf_ring[i]) {
94 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
95 rq->mbuf_ring[i] = NULL;
100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
102 vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
105 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
107 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
109 rte_pktmbuf_free_seg(mbuf);
113 static void enic_log_q_error(struct enic *enic)
118 for (i = 0; i < enic->wq_count; i++) {
119 error_status = vnic_wq_error_status(&enic->wq[i]);
121 dev_err(enic, "WQ[%d] error_status %d\n", i,
125 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
126 if (!enic->rq[i].in_use)
128 error_status = vnic_rq_error_status(&enic->rq[i]);
130 dev_err(enic, "RQ[%d] error_status %d\n", i,
135 static void enic_clear_soft_stats(struct enic *enic)
137 struct enic_soft_stats *soft_stats = &enic->soft_stats;
138 rte_atomic64_clear(&soft_stats->rx_nombuf);
139 rte_atomic64_clear(&soft_stats->rx_packet_errors);
140 rte_atomic64_clear(&soft_stats->tx_oversized);
143 static void enic_init_soft_stats(struct enic *enic)
145 struct enic_soft_stats *soft_stats = &enic->soft_stats;
146 rte_atomic64_init(&soft_stats->rx_nombuf);
147 rte_atomic64_init(&soft_stats->rx_packet_errors);
148 rte_atomic64_init(&soft_stats->tx_oversized);
149 enic_clear_soft_stats(enic);
152 void enic_dev_stats_clear(struct enic *enic)
154 if (vnic_dev_stats_clear(enic->vdev))
155 dev_err(enic, "Error in clearing stats\n");
156 enic_clear_soft_stats(enic);
159 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
161 struct vnic_stats *stats;
162 struct enic_soft_stats *soft_stats = &enic->soft_stats;
163 int64_t rx_truncated;
164 uint64_t rx_packet_errors;
166 if (vnic_dev_stats_dump(enic->vdev, &stats)) {
167 dev_err(enic, "Error in getting stats\n");
171 /* The number of truncated packets can only be calculated by
172 * subtracting a hardware counter from error packets received by
173 * the driver. Note: this causes transient inaccuracies in the
174 * ipackets count. Also, the length of truncated packets are
175 * counted in ibytes even though truncated packets are dropped
176 * which can make ibytes be slightly higher than it should be.
178 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
179 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
181 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
182 r_stats->opackets = stats->tx.tx_frames_ok;
184 r_stats->ibytes = stats->rx.rx_bytes_ok;
185 r_stats->obytes = stats->tx.tx_bytes_ok;
187 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
188 r_stats->oerrors = stats->tx.tx_errors
189 + rte_atomic64_read(&soft_stats->tx_oversized);
191 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
193 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
196 void enic_del_mac_address(struct enic *enic)
198 if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
199 dev_err(enic, "del mac addr failed\n");
202 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
206 if (!is_eth_addr_valid(mac_addr)) {
207 dev_err(enic, "invalid mac address\n");
211 err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
213 dev_err(enic, "del mac addr failed\n");
217 ether_addr_copy((struct ether_addr *)mac_addr,
218 (struct ether_addr *)enic->mac_addr);
220 err = vnic_dev_add_addr(enic->vdev, mac_addr);
222 dev_err(enic, "add mac addr failed\n");
228 enic_free_rq_buf(struct rte_mbuf **mbuf)
233 rte_pktmbuf_free(*mbuf);
237 void enic_init_vnic_resources(struct enic *enic)
239 unsigned int error_interrupt_enable = 1;
240 unsigned int error_interrupt_offset = 0;
241 unsigned int index = 0;
243 struct vnic_rq *data_rq;
245 for (index = 0; index < enic->rq_count; index++) {
246 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
248 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
250 error_interrupt_enable,
251 error_interrupt_offset);
253 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
255 vnic_rq_init(data_rq,
257 error_interrupt_enable,
258 error_interrupt_offset);
260 vnic_cq_init(&enic->cq[cq_idx],
261 0 /* flow_control_enable */,
262 1 /* color_enable */,
265 1 /* cq_tail_color */,
266 0 /* interrupt_enable */,
267 1 /* cq_entry_enable */,
268 0 /* cq_message_enable */,
269 0 /* interrupt offset */,
270 0 /* cq_message_addr */);
273 for (index = 0; index < enic->wq_count; index++) {
274 vnic_wq_init(&enic->wq[index],
275 enic_cq_wq(enic, index),
276 error_interrupt_enable,
277 error_interrupt_offset);
279 cq_idx = enic_cq_wq(enic, index);
280 vnic_cq_init(&enic->cq[cq_idx],
281 0 /* flow_control_enable */,
282 1 /* color_enable */,
285 1 /* cq_tail_color */,
286 0 /* interrupt_enable */,
287 0 /* cq_entry_enable */,
288 1 /* cq_message_enable */,
289 0 /* interrupt offset */,
290 (u64)enic->wq[index].cqmsg_rz->phys_addr);
293 vnic_intr_init(&enic->intr,
294 enic->config.intr_timer_usec,
295 enic->config.intr_timer_type,
296 /*mask_on_assertion*/1);
301 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
304 struct rq_enet_desc *rqd = rq->ring.descs;
311 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
312 rq->ring.desc_count);
314 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
315 mb = rte_mbuf_raw_alloc(rq->mp);
317 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
318 (unsigned)rq->index);
322 mb->data_off = RTE_PKTMBUF_HEADROOM;
323 dma_addr = (dma_addr_t)(mb->buf_physaddr
324 + RTE_PKTMBUF_HEADROOM);
325 rq_enet_desc_enc(rqd, dma_addr,
326 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
327 : RQ_ENET_TYPE_NOT_SOP),
328 mb->buf_len - RTE_PKTMBUF_HEADROOM);
329 rq->mbuf_ring[i] = mb;
332 /* make sure all prior writes are complete before doing the PIO write */
335 /* Post all but the last buffer to VIC. */
336 rq->posted_index = rq->ring.desc_count - 1;
340 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
341 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
342 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
343 iowrite32(0, &rq->ctrl->fetch_index);
351 enic_alloc_consistent(void *priv, size_t size,
352 dma_addr_t *dma_handle, u8 *name)
355 const struct rte_memzone *rz;
357 struct enic *enic = (struct enic *)priv;
358 struct enic_memzone_entry *mze;
360 rz = rte_memzone_reserve_aligned((const char *)name,
361 size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
363 pr_err("%s : Failed to allocate memory requested for %s\n",
369 *dma_handle = (dma_addr_t)rz->phys_addr;
371 mze = rte_malloc("enic memzone entry",
372 sizeof(struct enic_memzone_entry), 0);
375 pr_err("%s : Failed to allocate memory for memzone list\n",
377 rte_memzone_free(rz);
383 rte_spinlock_lock(&enic->memzone_list_lock);
384 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
385 rte_spinlock_unlock(&enic->memzone_list_lock);
391 enic_free_consistent(void *priv,
392 __rte_unused size_t size,
394 dma_addr_t dma_handle)
396 struct enic_memzone_entry *mze;
397 struct enic *enic = (struct enic *)priv;
399 rte_spinlock_lock(&enic->memzone_list_lock);
400 LIST_FOREACH(mze, &enic->memzone_list, entries) {
401 if (mze->rz->addr == vaddr &&
402 mze->rz->phys_addr == dma_handle)
406 rte_spinlock_unlock(&enic->memzone_list_lock);
408 "Tried to free memory, but couldn't find it in the memzone list\n");
411 LIST_REMOVE(mze, entries);
412 rte_spinlock_unlock(&enic->memzone_list_lock);
413 rte_memzone_free(mze->rz);
417 int enic_link_update(struct enic *enic)
419 struct rte_eth_dev *eth_dev = enic->rte_dev;
423 link_status = enic_get_link_status(enic);
424 ret = (link_status == enic->link_status);
425 enic->link_status = link_status;
426 eth_dev->data->dev_link.link_status = link_status;
427 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
428 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
433 enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
436 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
437 struct enic *enic = pmd_priv(dev);
439 vnic_intr_return_all_credits(&enic->intr);
441 enic_link_update(enic);
442 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
443 enic_log_q_error(enic);
446 int enic_enable(struct enic *enic)
450 struct rte_eth_dev *eth_dev = enic->rte_dev;
452 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
453 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
455 /* vnic notification of link status has already been turned on in
456 * enic_dev_init() which is called during probe time. Here we are
457 * just turning on interrupt vector 0 if needed.
459 if (eth_dev->data->dev_conf.intr_conf.lsc)
460 vnic_dev_notify_set(enic->vdev, 0);
462 if (enic_clsf_init(enic))
463 dev_warning(enic, "Init of hash table for clsf failed."\
464 "Flow director feature will not work\n");
466 for (index = 0; index < enic->rq_count; index++) {
467 err = enic_alloc_rx_queue_mbufs(enic,
468 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
470 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
473 err = enic_alloc_rx_queue_mbufs(enic,
474 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
476 /* release the allocated mbufs for the sop rq*/
477 enic_rxmbuf_queue_release(enic,
478 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
480 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
485 for (index = 0; index < enic->wq_count; index++)
486 enic_start_wq(enic, index);
487 for (index = 0; index < enic->rq_count; index++)
488 enic_start_rq(enic, index);
490 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
492 vnic_dev_enable_wait(enic->vdev);
494 /* Register and enable error interrupt */
495 rte_intr_callback_register(&(enic->pdev->intr_handle),
496 enic_intr_handler, (void *)enic->rte_dev);
498 rte_intr_enable(&(enic->pdev->intr_handle));
499 vnic_intr_unmask(&enic->intr);
504 int enic_alloc_intr_resources(struct enic *enic)
508 dev_info(enic, "vNIC resources used: "\
509 "wq %d rq %d cq %d intr %d\n",
510 enic->wq_count, enic_vnic_rq_count(enic),
511 enic->cq_count, enic->intr_count);
513 err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
515 enic_free_vnic_resources(enic);
520 void enic_free_rq(void *rxq)
522 struct vnic_rq *rq_sop, *rq_data;
528 rq_sop = (struct vnic_rq *)rxq;
529 enic = vnic_dev_priv(rq_sop->vdev);
530 rq_data = &enic->rq[rq_sop->data_queue_idx];
532 enic_rxmbuf_queue_release(enic, rq_sop);
534 enic_rxmbuf_queue_release(enic, rq_data);
536 rte_free(rq_sop->mbuf_ring);
538 rte_free(rq_data->mbuf_ring);
540 rq_sop->mbuf_ring = NULL;
541 rq_data->mbuf_ring = NULL;
543 vnic_rq_free(rq_sop);
545 vnic_rq_free(rq_data);
547 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
553 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
555 struct rte_eth_dev *eth_dev = enic->rte_dev;
556 vnic_wq_enable(&enic->wq[queue_idx]);
557 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
560 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
562 struct rte_eth_dev *eth_dev = enic->rte_dev;
565 ret = vnic_wq_disable(&enic->wq[queue_idx]);
569 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
573 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
575 struct vnic_rq *rq_sop;
576 struct vnic_rq *rq_data;
577 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
578 rq_data = &enic->rq[rq_sop->data_queue_idx];
579 struct rte_eth_dev *eth_dev = enic->rte_dev;
582 vnic_rq_enable(rq_data);
584 vnic_rq_enable(rq_sop);
585 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
588 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
590 int ret1 = 0, ret2 = 0;
591 struct rte_eth_dev *eth_dev = enic->rte_dev;
592 struct vnic_rq *rq_sop;
593 struct vnic_rq *rq_data;
594 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
595 rq_data = &enic->rq[rq_sop->data_queue_idx];
597 ret2 = vnic_rq_disable(rq_sop);
600 ret1 = vnic_rq_disable(rq_data);
607 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
611 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
612 unsigned int socket_id, struct rte_mempool *mp,
613 uint16_t nb_desc, uint16_t free_thresh)
616 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
617 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
618 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
619 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
620 unsigned int mbuf_size, mbufs_per_pkt;
621 unsigned int nb_sop_desc, nb_data_desc;
622 uint16_t min_sop, max_sop, min_data, max_data;
623 uint16_t mtu = enic->rte_dev->data->mtu;
626 rq_sop->data_queue_idx = data_queue_idx;
628 rq_data->data_queue_idx = 0;
629 rq_sop->socket_id = socket_id;
631 rq_data->socket_id = socket_id;
634 rq_sop->rx_free_thresh = free_thresh;
635 rq_data->rx_free_thresh = free_thresh;
636 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
639 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
640 RTE_PKTMBUF_HEADROOM);
642 if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
643 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
644 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
645 mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
646 (mbuf_size - 1)) / mbuf_size;
648 dev_info(enic, "Scatter rx mode disabled\n");
652 if (mbufs_per_pkt > 1) {
653 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
654 rq_sop->data_queue_enable = 1;
657 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
659 rq_sop->data_queue_enable = 0;
663 /* number of descriptors have to be a multiple of 32 */
664 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
665 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
667 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
668 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
670 if (mbufs_per_pkt > 1) {
672 max_sop = ((enic->config.rq_desc_count /
673 (mbufs_per_pkt - 1)) & ~0x1F);
674 min_data = min_sop * (mbufs_per_pkt - 1);
675 max_data = enic->config.rq_desc_count;
678 max_sop = enic->config.rq_desc_count;
683 if (nb_desc < (min_sop + min_data)) {
685 "Number of rx descs too low, adjusting to minimum\n");
686 nb_sop_desc = min_sop;
687 nb_data_desc = min_data;
688 } else if (nb_desc > (max_sop + max_data)) {
690 "Number of rx_descs too high, adjusting to maximum\n");
691 nb_sop_desc = max_sop;
692 nb_data_desc = max_data;
694 if (mbufs_per_pkt > 1) {
695 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
696 mtu, mbuf_size, min_sop + min_data,
699 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
700 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
702 /* Allocate sop queue resources */
703 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
704 nb_sop_desc, sizeof(struct rq_enet_desc));
706 dev_err(enic, "error in allocation of sop rq\n");
709 nb_sop_desc = rq_sop->ring.desc_count;
711 if (rq_data->in_use) {
712 /* Allocate data queue resources */
713 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
715 sizeof(struct rq_enet_desc));
717 dev_err(enic, "error in allocation of data rq\n");
718 goto err_free_rq_sop;
720 nb_data_desc = rq_data->ring.desc_count;
722 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
723 socket_id, nb_sop_desc + nb_data_desc,
724 sizeof(struct cq_enet_rq_desc));
726 dev_err(enic, "error in allocation of cq for rq\n");
727 goto err_free_rq_data;
730 /* Allocate the mbuf rings */
731 rq_sop->mbuf_ring = (struct rte_mbuf **)
732 rte_zmalloc_socket("rq->mbuf_ring",
733 sizeof(struct rte_mbuf *) * nb_sop_desc,
734 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
735 if (rq_sop->mbuf_ring == NULL)
738 if (rq_data->in_use) {
739 rq_data->mbuf_ring = (struct rte_mbuf **)
740 rte_zmalloc_socket("rq->mbuf_ring",
741 sizeof(struct rte_mbuf *) * nb_data_desc,
742 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
743 if (rq_data->mbuf_ring == NULL)
744 goto err_free_sop_mbuf;
747 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
752 rte_free(rq_sop->mbuf_ring);
754 /* cleanup on error */
755 vnic_cq_free(&enic->cq[queue_idx]);
758 vnic_rq_free(rq_data);
760 vnic_rq_free(rq_sop);
765 void enic_free_wq(void *txq)
773 wq = (struct vnic_wq *)txq;
774 enic = vnic_dev_priv(wq->vdev);
775 rte_memzone_free(wq->cqmsg_rz);
777 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
780 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
781 unsigned int socket_id, uint16_t nb_desc)
784 struct vnic_wq *wq = &enic->wq[queue_idx];
785 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
789 wq->socket_id = socket_id;
791 if (nb_desc > enic->config.wq_desc_count) {
793 "WQ %d - number of tx desc in cmd line (%d)"\
794 "is greater than that in the UCSM/CIMC adapter"\
795 "policy. Applying the value in the adapter "\
797 queue_idx, nb_desc, enic->config.wq_desc_count);
798 } else if (nb_desc != enic->config.wq_desc_count) {
799 enic->config.wq_desc_count = nb_desc;
801 "TX Queues - effective number of descs:%d\n",
806 /* Allocate queue resources */
807 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
808 enic->config.wq_desc_count,
809 sizeof(struct wq_enet_desc));
811 dev_err(enic, "error in allocation of wq\n");
815 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
816 socket_id, enic->config.wq_desc_count,
817 sizeof(struct cq_enet_wq_desc));
820 dev_err(enic, "error in allocation of cq for wq\n");
823 /* setup up CQ message */
824 snprintf((char *)name, sizeof(name),
825 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
828 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
838 int enic_disable(struct enic *enic)
843 vnic_intr_mask(&enic->intr);
844 (void)vnic_intr_masked(&enic->intr); /* flush write */
845 rte_intr_disable(&enic->pdev->intr_handle);
846 rte_intr_callback_unregister(&enic->pdev->intr_handle,
848 (void *)enic->rte_dev);
850 vnic_dev_disable(enic->vdev);
852 enic_clsf_destroy(enic);
854 if (!enic_is_sriov_vf(enic))
855 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
857 for (i = 0; i < enic->wq_count; i++) {
858 err = vnic_wq_disable(&enic->wq[i]);
862 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
863 if (enic->rq[i].in_use) {
864 err = vnic_rq_disable(&enic->rq[i]);
870 /* If we were using interrupts, set the interrupt vector to -1
871 * to disable interrupts. We are not disabling link notifcations,
872 * though, as we want the polling of link status to continue working.
874 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
875 vnic_dev_notify_set(enic->vdev, -1);
877 vnic_dev_set_reset_flag(enic->vdev, 1);
879 for (i = 0; i < enic->wq_count; i++)
880 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
882 for (i = 0; i < enic_vnic_rq_count(enic); i++)
883 if (enic->rq[i].in_use)
884 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
885 for (i = 0; i < enic->cq_count; i++)
886 vnic_cq_clean(&enic->cq[i]);
887 vnic_intr_clean(&enic->intr);
892 static int enic_dev_wait(struct vnic_dev *vdev,
893 int (*start)(struct vnic_dev *, int),
894 int (*finished)(struct vnic_dev *, int *),
901 err = start(vdev, arg);
905 /* Wait for func to complete...2 seconds max */
906 for (i = 0; i < 2000; i++) {
907 err = finished(vdev, &done);
917 static int enic_dev_open(struct enic *enic)
921 err = enic_dev_wait(enic->vdev, vnic_dev_open,
922 vnic_dev_open_done, 0);
924 dev_err(enic_get_dev(enic),
925 "vNIC device open failed, err %d\n", err);
930 static int enic_set_rsskey(struct enic *enic)
932 dma_addr_t rss_key_buf_pa;
933 union vnic_rss_key *rss_key_buf_va = NULL;
934 static union vnic_rss_key rss_key = {
936 [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
937 [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
938 [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
939 [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
945 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
946 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
947 &rss_key_buf_pa, name);
951 rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
953 err = enic_set_rss_key(enic,
955 sizeof(union vnic_rss_key));
957 enic_free_consistent(enic, sizeof(union vnic_rss_key),
958 rss_key_buf_va, rss_key_buf_pa);
963 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
965 dma_addr_t rss_cpu_buf_pa;
966 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
971 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
972 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
973 &rss_cpu_buf_pa, name);
977 for (i = 0; i < (1 << rss_hash_bits); i++)
978 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
979 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
981 err = enic_set_rss_cpu(enic,
983 sizeof(union vnic_rss_cpu));
985 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
986 rss_cpu_buf_va, rss_cpu_buf_pa);
991 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
992 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
994 const u8 tso_ipid_split_en = 0;
997 /* Enable VLAN tag stripping */
999 err = enic_set_nic_cfg(enic,
1000 rss_default_cpu, rss_hash_type,
1001 rss_hash_bits, rss_base_cpu,
1002 rss_enable, tso_ipid_split_en,
1003 enic->ig_vlan_strip_en);
1008 int enic_set_rss_nic_cfg(struct enic *enic)
1010 const u8 rss_default_cpu = 0;
1011 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1012 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1013 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1014 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1015 const u8 rss_hash_bits = 7;
1016 const u8 rss_base_cpu = 0;
1017 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1020 if (!enic_set_rsskey(enic)) {
1021 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1023 dev_warning(enic, "RSS disabled, "\
1024 "Failed to set RSS cpu indirection table.");
1029 "RSS disabled, Failed to set RSS key.\n");
1033 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1034 rss_hash_bits, rss_base_cpu, rss_enable);
1037 int enic_setup_finish(struct enic *enic)
1041 enic_init_soft_stats(enic);
1043 ret = enic_set_rss_nic_cfg(enic);
1045 dev_err(enic, "Failed to config nic, aborting.\n");
1050 vnic_dev_packet_filter(enic->vdev,
1063 void enic_add_packet_filter(struct enic *enic)
1065 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1066 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1067 enic->promisc, enic->allmulti);
1070 int enic_get_link_status(struct enic *enic)
1072 return vnic_dev_link_status(enic->vdev);
1075 static void enic_dev_deinit(struct enic *enic)
1077 struct rte_eth_dev *eth_dev = enic->rte_dev;
1079 /* stop link status checking */
1080 vnic_dev_notify_unset(enic->vdev);
1082 rte_free(eth_dev->data->mac_addrs);
1089 int enic_set_vnic_res(struct enic *enic)
1091 struct rte_eth_dev *eth_dev = enic->rte_dev;
1093 unsigned int required_rq, required_wq, required_cq;
1095 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1096 required_rq = eth_dev->data->nb_rx_queues * 2;
1097 required_wq = eth_dev->data->nb_tx_queues;
1098 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1100 if (enic->conf_rq_count < required_rq) {
1101 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1102 eth_dev->data->nb_rx_queues,
1103 required_rq, enic->conf_rq_count);
1106 if (enic->conf_wq_count < required_wq) {
1107 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1108 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1112 if (enic->conf_cq_count < required_cq) {
1113 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1114 required_cq, enic->conf_cq_count);
1119 enic->rq_count = eth_dev->data->nb_rx_queues;
1120 enic->wq_count = eth_dev->data->nb_tx_queues;
1121 enic->cq_count = enic->rq_count + enic->wq_count;
1127 /* Initialize the completion queue for an RQ */
1129 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1131 struct vnic_rq *sop_rq, *data_rq;
1132 unsigned int cq_idx;
1135 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1136 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1139 vnic_cq_clean(&enic->cq[cq_idx]);
1140 vnic_cq_init(&enic->cq[cq_idx],
1141 0 /* flow_control_enable */,
1142 1 /* color_enable */,
1145 1 /* cq_tail_color */,
1146 0 /* interrupt_enable */,
1147 1 /* cq_entry_enable */,
1148 0 /* cq_message_enable */,
1149 0 /* interrupt offset */,
1150 0 /* cq_message_addr */);
1153 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1154 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1155 sop_rq->ring.desc_count - 1, 1, 0);
1156 if (data_rq->in_use) {
1157 vnic_rq_init_start(data_rq,
1159 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1160 data_rq->ring.desc_count - 1, 1, 0);
1163 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1167 if (data_rq->in_use) {
1168 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1170 enic_rxmbuf_queue_release(enic, sop_rq);
1178 /* The Cisco NIC can send and receive packets up to a max packet size
1179 * determined by the NIC type and firmware. There is also an MTU
1180 * configured into the NIC via the CIMC/UCSM management interface
1181 * which can be overridden by this function (up to the max packet size).
1182 * Depending on the network setup, doing so may cause packet drops
1183 * and unexpected behavior.
1185 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1187 unsigned int rq_idx;
1190 uint16_t old_mtu; /* previous setting */
1191 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1192 struct rte_eth_dev *eth_dev = enic->rte_dev;
1194 old_mtu = eth_dev->data->mtu;
1195 config_mtu = enic->config.mtu;
1197 if (new_mtu > enic->max_mtu) {
1199 "MTU not updated: requested (%u) greater than max (%u)\n",
1200 new_mtu, enic->max_mtu);
1203 if (new_mtu < ENIC_MIN_MTU) {
1205 "MTU not updated: requested (%u) less than min (%u)\n",
1206 new_mtu, ENIC_MIN_MTU);
1209 if (new_mtu > config_mtu)
1211 "MTU (%u) is greater than value configured in NIC (%u)\n",
1212 new_mtu, config_mtu);
1214 /* The easy case is when scatter is disabled. However if the MTU
1215 * becomes greater than the mbuf data size, packet drops will ensue.
1217 if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
1218 eth_dev->data->mtu = new_mtu;
1222 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1223 * change Rx scatter mode if necessary for better performance. I.e. if
1224 * MTU was greater than the mbuf size and now it's less, scatter Rx
1225 * doesn't have to be used and vice versa.
1227 rte_spinlock_lock(&enic->mtu_lock);
1229 /* Stop traffic on all RQs */
1230 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1231 rq = &enic->rq[rq_idx];
1232 if (rq->is_sop && rq->in_use) {
1233 rc = enic_stop_rq(enic,
1234 enic_sop_rq_idx_to_rte_idx(rq_idx));
1236 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1242 /* replace Rx funciton with a no-op to avoid getting stale pkts */
1243 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1246 /* Allow time for threads to exit the real Rx function. */
1249 /* now it is safe to reconfigure the RQs */
1251 /* update the mtu */
1252 eth_dev->data->mtu = new_mtu;
1254 /* free and reallocate RQs with the new MTU */
1255 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1256 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1261 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1262 rq->tot_nb_desc, rq->rx_free_thresh);
1265 "Fatal MTU alloc error- No traffic will pass\n");
1269 rc = enic_reinit_rq(enic, rq_idx);
1272 "Fatal MTU RQ reinit- No traffic will pass\n");
1277 /* put back the real receive function */
1279 eth_dev->rx_pkt_burst = enic_recv_pkts;
1282 /* restart Rx traffic */
1283 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1284 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1285 if (rq->is_sop && rq->in_use)
1286 enic_start_rq(enic, rq_idx);
1290 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1291 rte_spinlock_unlock(&enic->mtu_lock);
1295 static int enic_dev_init(struct enic *enic)
1298 struct rte_eth_dev *eth_dev = enic->rte_dev;
1300 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1302 /* Get vNIC configuration
1304 err = enic_get_vnic_config(enic);
1306 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1310 /* Get available resource counts */
1311 enic_get_res_counts(enic);
1312 if (enic->conf_rq_count == 1) {
1313 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1314 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1315 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1318 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1319 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1320 enic->conf_cq_count, 8);
1321 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1322 enic->conf_rq_count, 8);
1323 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1324 enic->conf_wq_count, 8);
1325 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1326 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1329 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1330 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1333 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1334 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1338 /* Get the supported filters */
1339 enic_fdir_info(enic);
1341 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
1342 if (!eth_dev->data->mac_addrs) {
1343 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1346 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1347 ð_dev->data->mac_addrs[0]);
1349 vnic_dev_set_reset_flag(enic->vdev, 0);
1351 /* set up link status checking */
1352 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1358 int enic_probe(struct enic *enic)
1360 struct rte_pci_device *pdev = enic->pdev;
1363 dev_debug(enic, " Initializing ENIC PMD\n");
1365 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1366 enic->bar0.len = pdev->mem_resource[0].len;
1368 /* Register vNIC device */
1369 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1371 dev_err(enic, "vNIC registration failed, aborting\n");
1375 LIST_INIT(&enic->memzone_list);
1376 rte_spinlock_init(&enic->memzone_list_lock);
1378 vnic_register_cbacks(enic->vdev,
1379 enic_alloc_consistent,
1380 enic_free_consistent);
1383 * Allocate the consistent memory for stats upfront so both primary and
1384 * secondary processes can dump stats.
1386 err = vnic_dev_alloc_stats_mem(enic->vdev);
1388 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1389 goto err_out_unregister;
1391 /* Issue device open to get device in known state */
1392 err = enic_dev_open(enic);
1394 dev_err(enic, "vNIC dev open failed, aborting\n");
1395 goto err_out_unregister;
1398 /* Set ingress vlan rewrite mode before vnic initialization */
1399 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1400 IG_VLAN_REWRITE_MODE_PASS_THRU);
1403 "Failed to set ingress vlan rewrite mode, aborting.\n");
1404 goto err_out_dev_close;
1407 /* Issue device init to initialize the vnic-to-switch link.
1408 * We'll start with carrier off and wait for link UP
1409 * notification later to turn on carrier. We don't need
1410 * to wait here for the vnic-to-switch link initialization
1411 * to complete; link UP notification is the indication that
1412 * the process is complete.
1415 err = vnic_dev_init(enic->vdev, 0);
1417 dev_err(enic, "vNIC dev init failed, aborting\n");
1418 goto err_out_dev_close;
1421 err = enic_dev_init(enic);
1423 dev_err(enic, "Device initialization failed, aborting\n");
1424 goto err_out_dev_close;
1430 vnic_dev_close(enic->vdev);
1432 vnic_dev_unregister(enic->vdev);
1437 void enic_remove(struct enic *enic)
1439 enic_dev_deinit(enic);
1440 vnic_dev_close(enic->vdev);
1441 vnic_dev_unregister(enic->vdev);