4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
59 #define I40E_CFG_CRCSTRIP_DEFAULT 1
62 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
63 struct i40e_virtchnl_queue_select *qsel,
67 * Bind PF queues with VSI and VF.
70 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
73 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
74 uint16_t vsi_id = vf->vsi->vsi_id;
75 uint16_t vf_id = vf->vf_idx;
76 uint16_t nb_qps = vf->vsi->nb_qps;
77 uint16_t qbase = vf->vsi->base_queue;
82 * VF should use scatter range queues. So, it needn't
83 * to set QBASE in this register.
85 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
86 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
88 /* Set to enable VFLAN_QTABLE[] registers valid */
89 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
90 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
92 /* map PF queues to VF */
93 for (i = 0; i < nb_qps; i++) {
94 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
95 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
98 /* map PF queues to VSI */
99 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
100 if (2 * i > nb_qps - 1)
101 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
105 if (2 * i + 1 > nb_qps - 1)
106 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
108 q2 = qbase + 2 * i + 1;
110 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
111 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
113 I40E_WRITE_FLUSH(hw);
120 * Proceed VF reset operation.
123 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
128 uint16_t vf_id, abs_vf_id, vf_msix_num;
130 struct i40e_virtchnl_queue_select qsel;
136 hw = I40E_PF_TO_HW(vf->pf);
138 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
140 /* Notify VF that we are in VFR progress */
141 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
144 * If require a SW VF reset, a VFLR interrupt will be generated,
145 * this function will be called again. To avoid it,
146 * disable interrupt first.
149 vf->state = I40E_VF_INRESET;
150 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
151 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
152 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
153 I40E_WRITE_FLUSH(hw);
156 #define VFRESET_MAX_WAIT_CNT 100
157 /* Wait until VF reset is done */
158 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
160 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
161 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
165 if (i >= VFRESET_MAX_WAIT_CNT) {
166 PMD_DRV_LOG(ERR, "VF reset timeout");
170 /* This is not first time to do reset, do cleanup job first */
173 memset(&qsel, 0, sizeof(qsel));
174 for (i = 0; i < vf->vsi->nb_qps; i++)
175 qsel.rx_queues |= 1 << i;
176 qsel.tx_queues = qsel.rx_queues;
177 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
178 if (ret != I40E_SUCCESS) {
179 PMD_DRV_LOG(ERR, "Disable VF queues failed");
183 /* Disable VF interrupt setting */
184 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
185 for (i = 0; i < vf_msix_num; i++) {
187 val = I40E_VFINT_DYN_CTL0(vf_id);
189 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
191 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
193 I40E_WRITE_FLUSH(hw);
196 ret = i40e_vsi_release(vf->vsi);
197 if (ret != I40E_SUCCESS) {
198 PMD_DRV_LOG(ERR, "Release VSI failed");
203 #define I40E_VF_PCI_ADDR 0xAA
204 #define I40E_VF_PEND_MASK 0x20
205 /* Check the pending transactions of this VF */
206 /* Use absolute VF id, refer to datasheet for details */
207 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
208 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
209 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
211 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
212 if ((val & I40E_VF_PEND_MASK) == 0)
216 if (i >= VFRESET_MAX_WAIT_CNT) {
217 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
221 /* Reset done, Set COMPLETE flag and clear reset bit */
222 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
223 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
224 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
225 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
227 I40E_WRITE_FLUSH(hw);
229 /* Allocate resource again */
230 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
231 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
234 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
235 vf->pf->main_vsi, vf->vf_idx);
238 if (vf->vsi == NULL) {
239 PMD_DRV_LOG(ERR, "Add vsi failed");
243 ret = i40e_pf_vf_queues_mapping(vf);
244 if (ret != I40E_SUCCESS) {
245 PMD_DRV_LOG(ERR, "queue mapping error");
246 i40e_vsi_release(vf->vsi);
250 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
256 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
262 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
263 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
266 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
269 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
270 hw->aq.asq_last_status);
277 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
279 struct i40e_virtchnl_version_info info;
281 info.major = I40E_DPDK_VERSION_MAJOR;
282 info.minor = I40E_DPDK_VERSION_MINOR;
283 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
284 I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
288 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
290 i40e_pf_host_vf_reset(vf, 1);
292 /* No feedback will be sent to VF for VFLR */
297 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
299 struct i40e_virtchnl_vf_resource *vf_res = NULL;
300 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
302 int ret = I40E_SUCCESS;
304 /* only have 1 VSI by default */
305 len = sizeof(struct i40e_virtchnl_vf_resource) +
306 I40E_DEFAULT_VF_VSI_NUM *
307 sizeof(struct i40e_virtchnl_vsi_resource);
309 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
310 if (vf_res == NULL) {
311 PMD_DRV_LOG(ERR, "failed to allocate mem");
312 ret = I40E_ERR_NO_MEMORY;
318 vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
319 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
320 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
321 vf_res->num_queue_pairs = vf->vsi->nb_qps;
322 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
324 /* Change below setting if PF host can support more VSIs for VF */
325 vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
326 /* As assume Vf only has single VSI now, always return 0 */
327 vf_res->vsi_res[0].vsi_id = 0;
328 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
329 ether_addr_copy(&vf->mac_addr,
330 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
333 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
334 ret, (uint8_t *)vf_res, len);
341 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
342 struct i40e_pf_vf *vf,
343 struct i40e_virtchnl_rxq_info *rxq,
346 int err = I40E_SUCCESS;
347 struct i40e_hmc_obj_rxq rx_ctx;
348 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
350 /* Clear the context structure first */
351 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
352 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
353 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
354 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
355 rx_ctx.qlen = rxq->ring_len;
356 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
360 if (rxq->splithdr_enabled) {
361 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
362 rx_ctx.dtype = i40e_header_split_enabled;
364 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
365 rx_ctx.dtype = i40e_header_split_none;
367 rx_ctx.rxmax = rxq->max_pkt_size;
368 rx_ctx.tphrdesc_ena = 1;
369 rx_ctx.tphwdesc_ena = 1;
370 rx_ctx.tphdata_ena = 1;
371 rx_ctx.tphhead_ena = 1;
372 rx_ctx.lrxqthresh = 2;
373 rx_ctx.crcstrip = crcstrip;
377 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
378 if (err != I40E_SUCCESS)
380 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
386 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
387 struct i40e_pf_vf *vf,
388 struct i40e_virtchnl_txq_info *txq)
390 int err = I40E_SUCCESS;
391 struct i40e_hmc_obj_txq tx_ctx;
393 uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
396 /* clear the context structure first */
397 memset(&tx_ctx, 0, sizeof(tx_ctx));
398 tx_ctx.new_context = 1;
399 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
400 tx_ctx.qlen = txq->ring_len;
401 tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
402 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
403 if (err != I40E_SUCCESS)
406 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
407 if (err != I40E_SUCCESS)
410 /* bind queue with VF function, since TX/QX will appear in pair,
411 * so only has QTX_CTL to set.
413 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
414 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
415 I40E_QTX_CTL_PF_INDX_MASK) |
416 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
417 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
418 I40E_QTX_CTL_VFVM_INDX_MASK);
419 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
420 I40E_WRITE_FLUSH(hw);
426 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
430 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
431 struct i40e_vsi *vsi = vf->vsi;
432 struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
433 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
434 struct i40e_virtchnl_queue_pair_info *vc_qpi;
435 int i, ret = I40E_SUCCESS;
437 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
438 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
439 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
440 vc_vqci->num_queue_pairs)) {
441 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
442 ret = I40E_ERR_PARAM;
446 vc_qpi = vc_vqci->qpair;
447 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
448 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
449 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
450 ret = I40E_ERR_PARAM;
455 * Apply VF RX queue setting to HMC.
456 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
457 * then the extra information of
458 * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
459 * otherwise set the last parameter to NULL.
461 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
462 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
463 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
464 ret = I40E_ERR_PARAM;
468 /* Apply VF TX queue setting to HMC */
469 if (i40e_pf_host_hmc_config_txq(hw, vf,
470 &vc_qpi[i].txq) != I40E_SUCCESS) {
471 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
472 ret = I40E_ERR_PARAM;
478 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
485 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
489 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
490 struct i40e_vsi *vsi = vf->vsi;
491 struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
492 (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
493 struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
494 int i, ret = I40E_SUCCESS;
496 if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
497 vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
498 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
499 vc_vqcei->num_queue_pairs)) {
500 PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
501 ret = I40E_ERR_PARAM;
505 vc_qpei = vc_vqcei->qpair;
506 for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
507 if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
508 vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
509 ret = I40E_ERR_PARAM;
513 * Apply VF RX queue setting to HMC.
514 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
515 * then the extra information of
516 * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
517 * otherwise set the last parameter to NULL.
519 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
520 vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
521 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
522 ret = I40E_ERR_PARAM;
526 /* Apply VF TX queue setting to HMC */
527 if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
529 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
530 ret = I40E_ERR_PARAM;
536 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
543 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
544 uint8_t *msg, uint16_t msglen)
546 int ret = I40E_SUCCESS;
547 struct i40e_virtchnl_irq_map_info *irqmap =
548 (struct i40e_virtchnl_irq_map_info *)msg;
550 if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
551 PMD_DRV_LOG(ERR, "buffer too short");
552 ret = I40E_ERR_PARAM;
556 /* Assume VF only have 1 vector to bind all queues */
557 if (irqmap->num_vectors != 1) {
558 PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
559 ret = I40E_ERR_PARAM;
563 /* This MSIX intr store the intr in VF range */
564 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
565 vf->vsi->nb_msix = irqmap->num_vectors;
566 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
568 /* Don't care how the TX/RX queue mapping with this vector.
569 * Link all VF RX queues together. Only did mapping work.
570 * VF can disable/enable the intr by itself.
572 i40e_vsi_queues_bind_intr(vf->vsi);
574 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
581 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
582 struct i40e_virtchnl_queue_select *qsel,
585 int ret = I40E_SUCCESS;
587 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
588 uint16_t baseq = vf->vsi->base_queue;
590 if (qsel->rx_queues + qsel->tx_queues == 0)
591 return I40E_ERR_PARAM;
593 /* always enable RX first and disable last */
594 /* Enable RX if it's enable */
596 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
597 if (qsel->rx_queues & (1 << i)) {
598 ret = i40e_switch_rx_queue(hw, baseq + i, on);
599 if (ret != I40E_SUCCESS)
604 /* Enable/Disable TX */
605 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
606 if (qsel->tx_queues & (1 << i)) {
607 ret = i40e_switch_tx_queue(hw, baseq + i, on);
608 if (ret != I40E_SUCCESS)
612 /* disable RX last if it's disable */
615 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
616 if (qsel->rx_queues & (1 << i)) {
617 ret = i40e_switch_rx_queue(hw, baseq + i, on);
618 if (ret != I40E_SUCCESS)
627 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
631 int ret = I40E_SUCCESS;
632 struct i40e_virtchnl_queue_select *q_sel =
633 (struct i40e_virtchnl_queue_select *)msg;
635 if (msg == NULL || msglen != sizeof(*q_sel)) {
636 ret = I40E_ERR_PARAM;
639 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
642 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
649 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
653 int ret = I40E_SUCCESS;
654 struct i40e_virtchnl_queue_select *q_sel =
655 (struct i40e_virtchnl_queue_select *)msg;
657 if (msg == NULL || msglen != sizeof(*q_sel)) {
658 ret = I40E_ERR_PARAM;
661 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
664 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
672 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
676 int ret = I40E_SUCCESS;
677 struct i40e_virtchnl_ether_addr_list *addr_list =
678 (struct i40e_virtchnl_ether_addr_list *)msg;
679 struct i40e_mac_filter_info filter;
681 struct ether_addr *mac;
683 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
685 if (msg == NULL || msglen <= sizeof(*addr_list)) {
686 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
687 ret = I40E_ERR_PARAM;
691 for (i = 0; i < addr_list->num_elements; i++) {
692 mac = (struct ether_addr *)(addr_list->list[i].addr);
693 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
694 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
695 if(!is_valid_assigned_ether_addr(mac) ||
696 i40e_vsi_add_mac(vf->vsi, &filter)) {
697 ret = I40E_ERR_INVALID_MAC_ADDR;
703 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
710 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
714 int ret = I40E_SUCCESS;
715 struct i40e_virtchnl_ether_addr_list *addr_list =
716 (struct i40e_virtchnl_ether_addr_list *)msg;
718 struct ether_addr *mac;
720 if (msg == NULL || msglen <= sizeof(*addr_list)) {
721 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
722 ret = I40E_ERR_PARAM;
726 for (i = 0; i < addr_list->num_elements; i++) {
727 mac = (struct ether_addr *)(addr_list->list[i].addr);
728 if(!is_valid_assigned_ether_addr(mac) ||
729 i40e_vsi_delete_mac(vf->vsi, mac)) {
730 ret = I40E_ERR_INVALID_MAC_ADDR;
736 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
743 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
744 uint8_t *msg, uint16_t msglen)
746 int ret = I40E_SUCCESS;
747 struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
748 (struct i40e_virtchnl_vlan_filter_list *)msg;
752 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
753 PMD_DRV_LOG(ERR, "add_vlan argument too short");
754 ret = I40E_ERR_PARAM;
758 vid = vlan_filter_list->vlan_id;
760 for (i = 0; i < vlan_filter_list->num_elements; i++) {
761 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
762 if(ret != I40E_SUCCESS)
767 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
774 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
778 int ret = I40E_SUCCESS;
779 struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
780 (struct i40e_virtchnl_vlan_filter_list *)msg;
784 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
785 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
786 ret = I40E_ERR_PARAM;
790 vid = vlan_filter_list->vlan_id;
791 for (i = 0; i < vlan_filter_list->num_elements; i++) {
792 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
793 if(ret != I40E_SUCCESS)
798 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
805 i40e_pf_host_process_cmd_config_promisc_mode(
806 struct i40e_pf_vf *vf,
810 int ret = I40E_SUCCESS;
811 struct i40e_virtchnl_promisc_info *promisc =
812 (struct i40e_virtchnl_promisc_info *)msg;
813 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
814 bool unicast = FALSE, multicast = FALSE;
816 if (msg == NULL || msglen != sizeof(*promisc)) {
817 ret = I40E_ERR_PARAM;
821 if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
823 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
824 vf->vsi->seid, unicast, NULL, true);
825 if (ret != I40E_SUCCESS)
828 if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
830 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
834 i40e_pf_host_send_msg_to_vf(vf,
835 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
841 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
843 i40e_update_vsi_stats(vf->vsi);
845 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
846 I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
847 sizeof(vf->vsi->eth_stats));
853 i40e_pf_host_process_cmd_cfg_vlan_offload(
854 struct i40e_pf_vf *vf,
858 int ret = I40E_SUCCESS;
859 struct i40e_virtchnl_vlan_offload_info *offload =
860 (struct i40e_virtchnl_vlan_offload_info *)msg;
862 if (msg == NULL || msglen != sizeof(*offload)) {
863 ret = I40E_ERR_PARAM;
867 ret = i40e_vsi_config_vlan_stripping(vf->vsi,
868 !!offload->enable_vlan_strip);
870 PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
873 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
880 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
884 int ret = I40E_SUCCESS;
885 struct i40e_virtchnl_pvid_info *tpid_info =
886 (struct i40e_virtchnl_pvid_info *)msg;
888 if (msg == NULL || msglen != sizeof(*tpid_info)) {
889 ret = I40E_ERR_PARAM;
893 ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
896 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
903 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
905 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
906 struct i40e_virtchnl_pf_event event;
907 uint16_t vf_id = vf->vf_idx;
910 event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
911 event.event_data.link_event.link_status =
912 dev->data->dev_link.link_status;
914 /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
915 switch (dev->data->dev_link.link_speed) {
916 case ETH_SPEED_NUM_100M:
917 event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
919 case ETH_SPEED_NUM_1G:
920 event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
922 case ETH_SPEED_NUM_10G:
923 event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
925 case ETH_SPEED_NUM_20G:
926 event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
928 case ETH_SPEED_NUM_25G:
929 event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
931 case ETH_SPEED_NUM_40G:
932 event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
935 event.event_data.link_event.link_speed =
936 I40E_LINK_SPEED_UNKNOWN;
940 tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
941 rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
943 if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
944 tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
945 rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
946 rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
947 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
948 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
952 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
953 uint16_t abs_vf_id, uint32_t opcode,
954 __rte_unused uint32_t retval,
958 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
959 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
960 struct i40e_pf_vf *vf;
961 /* AdminQ will pass absolute VF id, transfer to internal vf id */
962 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
964 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
965 PMD_DRV_LOG(ERR, "invalid argument");
969 vf = &pf->vfs[vf_id];
971 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
972 i40e_pf_host_send_msg_to_vf(vf, opcode,
973 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
978 case I40E_VIRTCHNL_OP_VERSION :
979 PMD_DRV_LOG(INFO, "OP_VERSION received");
980 i40e_pf_host_process_cmd_version(vf);
982 case I40E_VIRTCHNL_OP_RESET_VF :
983 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
984 i40e_pf_host_process_cmd_reset_vf(vf);
986 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
987 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
988 i40e_pf_host_process_cmd_get_vf_resource(vf);
990 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
991 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
992 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
994 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
995 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
996 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
999 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1000 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1001 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
1003 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1004 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1005 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1006 i40e_notify_vf_link_status(dev, vf);
1008 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1009 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1010 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
1012 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1013 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1014 i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
1016 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1017 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1018 i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
1020 case I40E_VIRTCHNL_OP_ADD_VLAN:
1021 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1022 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
1024 case I40E_VIRTCHNL_OP_DEL_VLAN:
1025 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1026 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
1028 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1029 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1030 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
1032 case I40E_VIRTCHNL_OP_GET_STATS:
1033 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1034 i40e_pf_host_process_cmd_get_stats(vf);
1036 case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
1037 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
1038 i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
1040 case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
1041 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
1042 i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
1044 /* Don't add command supported below, which will
1045 * return an error code.
1048 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1049 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1056 i40e_pf_host_init(struct rte_eth_dev *dev)
1058 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1059 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1063 PMD_INIT_FUNC_TRACE();
1066 * return if SRIOV not enabled, VF number not configured or
1067 * no queue assigned.
1069 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1070 return I40E_SUCCESS;
1072 /* Allocate memory to store VF structure */
1073 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1077 /* Disable irq0 for VFR event */
1078 i40e_pf_disable_irq0(hw);
1080 /* Disable VF link status interrupt */
1081 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1082 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1083 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1084 I40E_WRITE_FLUSH(hw);
1086 for (i = 0; i < pf->vf_num; i++) {
1088 pf->vfs[i].state = I40E_VF_INACTIVE;
1089 pf->vfs[i].vf_idx = i;
1090 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1091 if (ret != I40E_SUCCESS)
1093 eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
1097 i40e_pf_enable_irq0(hw);
1099 return I40E_SUCCESS;
1103 i40e_pf_enable_irq0(hw);
1109 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1111 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1112 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1115 PMD_INIT_FUNC_TRACE();
1118 * return if SRIOV not enabled, VF number not configured or
1119 * no queue assigned.
1121 if ((!hw->func_caps.sr_iov_1_1) ||
1122 (pf->vf_num == 0) ||
1123 (pf->vf_nb_qps == 0))
1124 return I40E_SUCCESS;
1126 /* free memory to store VF structure */
1130 /* Disable irq0 for VFR event */
1131 i40e_pf_disable_irq0(hw);
1133 /* Disable VF link status interrupt */
1134 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1135 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1136 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1137 I40E_WRITE_FLUSH(hw);
1139 return I40E_SUCCESS;