Imported Upstream version 17.05.2
[deb_dpdk.git] / drivers / net / i40e / i40e_pf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
57 #include "i40e_pf.h"
58 #include "rte_pmd_i40e.h"
59
60 #define I40E_CFG_CRCSTRIP_DEFAULT 1
61
62 static int
63 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
64                            struct i40e_virtchnl_queue_select *qsel,
65                            bool on);
66
67 /**
68  * Bind PF queues with VSI and VF.
69  **/
70 static int
71 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
72 {
73         int i;
74         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
75         uint16_t vsi_id = vf->vsi->vsi_id;
76         uint16_t vf_id  = vf->vf_idx;
77         uint16_t nb_qps = vf->vsi->nb_qps;
78         uint16_t qbase  = vf->vsi->base_queue;
79         uint16_t q1, q2;
80         uint32_t val;
81
82         /*
83          * VF should use scatter range queues. So, it needn't
84          * to set QBASE in this register.
85          */
86         i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
87                           I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
88
89         /* Set to enable VFLAN_QTABLE[] registers valid */
90         I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
91                 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
92
93         /* map PF queues to VF */
94         for (i = 0; i < nb_qps; i++) {
95                 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
96                 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
97         }
98
99         /* map PF queues to VSI */
100         for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
101                 if (2 * i > nb_qps - 1)
102                         q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
103                 else
104                         q1 = qbase + 2 * i;
105
106                 if (2 * i + 1 > nb_qps - 1)
107                         q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
108                 else
109                         q2 = qbase + 2 * i + 1;
110
111                 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
112                 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
113         }
114         I40E_WRITE_FLUSH(hw);
115
116         return I40E_SUCCESS;
117 }
118
119
120 /**
121  * Proceed VF reset operation.
122  */
123 int
124 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
125 {
126         uint32_t val, i;
127         struct i40e_hw *hw;
128         struct i40e_pf *pf;
129         uint16_t vf_id, abs_vf_id, vf_msix_num;
130         int ret;
131         struct i40e_virtchnl_queue_select qsel;
132
133         if (vf == NULL)
134                 return -EINVAL;
135
136         pf = vf->pf;
137         hw = I40E_PF_TO_HW(vf->pf);
138         vf_id = vf->vf_idx;
139         abs_vf_id = vf_id + hw->func_caps.vf_base_id;
140
141         /* Notify VF that we are in VFR progress */
142         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
143
144         /*
145          * If require a SW VF reset, a VFLR interrupt will be generated,
146          * this function will be called again. To avoid it,
147          * disable interrupt first.
148          */
149         if (do_hw_reset) {
150                 vf->state = I40E_VF_INRESET;
151                 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
152                 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
153                 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
154                 I40E_WRITE_FLUSH(hw);
155
156 #define VFRESET_MAX_WAIT_CNT 100
157                 /* Wait until VF reset is done */
158                 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
159                         rte_delay_us(10);
160                         val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
161                         if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
162                                 break;
163                 }
164
165                 if (i >= VFRESET_MAX_WAIT_CNT) {
166                         PMD_DRV_LOG(ERR, "VF reset timeout");
167                         return -ETIMEDOUT;
168                 }
169                 vf->state = I40E_VF_ACTIVE;
170         }
171         /* This is not first time to do reset, do cleanup job first */
172         if (vf->vsi) {
173                 /* Disable queues */
174                 memset(&qsel, 0, sizeof(qsel));
175                 for (i = 0; i < vf->vsi->nb_qps; i++)
176                         qsel.rx_queues |= 1 << i;
177                 qsel.tx_queues = qsel.rx_queues;
178                 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
179                 if (ret != I40E_SUCCESS) {
180                         PMD_DRV_LOG(ERR, "Disable VF queues failed");
181                         return -EFAULT;
182                 }
183
184                 /* Disable VF interrupt setting */
185                 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
186                 for (i = 0; i < vf_msix_num; i++) {
187                         if (!i)
188                                 val = I40E_VFINT_DYN_CTL0(vf_id);
189                         else
190                                 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
191                                                         (vf_id)) + (i - 1));
192                         I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
193                 }
194                 I40E_WRITE_FLUSH(hw);
195
196                 /* remove VSI */
197                 ret = i40e_vsi_release(vf->vsi);
198                 if (ret != I40E_SUCCESS) {
199                         PMD_DRV_LOG(ERR, "Release VSI failed");
200                         return -EFAULT;
201                 }
202         }
203
204 #define I40E_VF_PCI_ADDR  0xAA
205 #define I40E_VF_PEND_MASK 0x20
206         /* Check the pending transactions of this VF */
207         /* Use absolute VF id, refer to datasheet for details */
208         I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
209                 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
210         for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
211                 rte_delay_us(1);
212                 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
213                 if ((val & I40E_VF_PEND_MASK) == 0)
214                         break;
215         }
216
217         if (i >= VFRESET_MAX_WAIT_CNT) {
218                 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
219                 return -ETIMEDOUT;
220         }
221
222         /* Reset done, Set COMPLETE flag and clear reset bit */
223         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
224         val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
225         val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
226         I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
227         vf->reset_cnt++;
228         I40E_WRITE_FLUSH(hw);
229
230         /* Allocate resource again */
231         if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
232                 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
233                                          NULL, vf->vf_idx);
234         } else {
235                 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
236                                          vf->pf->main_vsi, vf->vf_idx);
237         }
238
239         if (vf->vsi == NULL) {
240                 PMD_DRV_LOG(ERR, "Add vsi failed");
241                 return -EFAULT;
242         }
243
244         ret = i40e_pf_vf_queues_mapping(vf);
245         if (ret != I40E_SUCCESS) {
246                 PMD_DRV_LOG(ERR, "queue mapping error");
247                 i40e_vsi_release(vf->vsi);
248                 return -EFAULT;
249         }
250
251         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
252
253         return ret;
254 }
255
256 int
257 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
258                             uint32_t opcode,
259                             uint32_t retval,
260                             uint8_t *msg,
261                             uint16_t msglen)
262 {
263         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
264         uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
265         int ret = I40E_ERR_ADMIN_QUEUE_ERROR;
266
267         if (vf->state == I40E_VF_INACTIVE)
268                 return ret;
269
270         ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
271                                                 msg, msglen, NULL);
272         if (ret) {
273                 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
274                              hw->aq.asq_last_status);
275         }
276
277         return ret;
278 }
279
280 static void
281 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
282 {
283         struct i40e_virtchnl_version_info info;
284
285         /* Respond like a Linux PF host in order to support both DPDK VF and
286          * Linux VF driver. The expense is original DPDK host specific feature
287          * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
288          *
289          * DPDK VF also can't identify host driver by version number returned.
290          * It always assume talking with Linux PF.
291          */
292         info.major = I40E_VIRTCHNL_VERSION_MAJOR;
293         info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
294
295         if (b_op)
296                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
297                                             I40E_SUCCESS,
298                                             (uint8_t *)&info,
299                                             sizeof(info));
300         else
301                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
302                                             I40E_NOT_SUPPORTED,
303                                             (uint8_t *)&info,
304                                             sizeof(info));
305 }
306
307 static int
308 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
309 {
310         i40e_pf_host_vf_reset(vf, 1);
311
312         /* No feedback will be sent to VF for VFLR */
313         return I40E_SUCCESS;
314 }
315
316 static int
317 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
318 {
319         struct i40e_virtchnl_vf_resource *vf_res = NULL;
320         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
321         uint32_t len = 0;
322         int ret = I40E_SUCCESS;
323
324         if (!b_op) {
325                 i40e_pf_host_send_msg_to_vf(vf,
326                                             I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
327                                             I40E_NOT_SUPPORTED, NULL, 0);
328                 return ret;
329         }
330
331         /* only have 1 VSI by default */
332         len =  sizeof(struct i40e_virtchnl_vf_resource) +
333                                 I40E_DEFAULT_VF_VSI_NUM *
334                 sizeof(struct i40e_virtchnl_vsi_resource);
335
336         vf_res = rte_zmalloc("i40e_vf_res", len, 0);
337         if (vf_res == NULL) {
338                 PMD_DRV_LOG(ERR, "failed to allocate mem");
339                 ret = I40E_ERR_NO_MEMORY;
340                 vf_res = NULL;
341                 len = 0;
342                 goto send_msg;
343         }
344
345         vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
346                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
347         vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
348         vf_res->num_queue_pairs = vf->vsi->nb_qps;
349         vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
350
351         /* Change below setting if PF host can support more VSIs for VF */
352         vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
353         vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
354         vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
355         ether_addr_copy(&vf->mac_addr,
356                 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
357
358 send_msg:
359         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
360                                         ret, (uint8_t *)vf_res, len);
361         rte_free(vf_res);
362
363         return ret;
364 }
365
366 static int
367 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
368                             struct i40e_pf_vf *vf,
369                             struct i40e_virtchnl_rxq_info *rxq,
370                             uint8_t crcstrip)
371 {
372         int err = I40E_SUCCESS;
373         struct i40e_hmc_obj_rxq rx_ctx;
374         uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
375
376         /* Clear the context structure first */
377         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
378         rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
379         rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
380         rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
381         rx_ctx.qlen = rxq->ring_len;
382 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
383         rx_ctx.dsize = 1;
384 #endif
385
386         if (rxq->splithdr_enabled) {
387                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
388                 rx_ctx.dtype = i40e_header_split_enabled;
389         } else {
390                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
391                 rx_ctx.dtype = i40e_header_split_none;
392         }
393         rx_ctx.rxmax = rxq->max_pkt_size;
394         rx_ctx.tphrdesc_ena = 1;
395         rx_ctx.tphwdesc_ena = 1;
396         rx_ctx.tphdata_ena = 1;
397         rx_ctx.tphhead_ena = 1;
398         rx_ctx.lrxqthresh = 2;
399         rx_ctx.crcstrip = crcstrip;
400         rx_ctx.l2tsel = 1;
401         rx_ctx.prefena = 1;
402
403         err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
404         if (err != I40E_SUCCESS)
405                 return err;
406         err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
407
408         return err;
409 }
410
411 static inline uint8_t
412 i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
413                 uint16_t queue_id)
414 {
415         struct i40e_aqc_vsi_properties_data *info = &vsi->info;
416         uint16_t bsf, qp_idx;
417         uint8_t i;
418
419         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
420                 if (vsi->enabled_tc & (1 << i)) {
421                         qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
422                                 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
423                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
424                         bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
425                                 I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
426                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
427                         if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
428                                 return i;
429                 }
430         }
431         return 0;
432 }
433
434 static int
435 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
436                             struct i40e_pf_vf *vf,
437                             struct i40e_virtchnl_txq_info *txq)
438 {
439         int err = I40E_SUCCESS;
440         struct i40e_hmc_obj_txq tx_ctx;
441         struct i40e_vsi *vsi = vf->vsi;
442         uint32_t qtx_ctl;
443         uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
444         uint8_t dcb_tc;
445
446         /* clear the context structure first */
447         memset(&tx_ctx, 0, sizeof(tx_ctx));
448         tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
449         tx_ctx.qlen = txq->ring_len;
450         dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
451         tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
452         tx_ctx.head_wb_ena = txq->headwb_enabled;
453         tx_ctx.head_wb_addr = txq->dma_headwb_addr;
454
455         err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
456         if (err != I40E_SUCCESS)
457                 return err;
458
459         err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
460         if (err != I40E_SUCCESS)
461                 return err;
462
463         /* bind queue with VF function, since TX/QX will appear in pair,
464          * so only has QTX_CTL to set.
465          */
466         qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
467                                 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
468                                 I40E_QTX_CTL_PF_INDX_MASK) |
469                                 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
470                                 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
471                                 I40E_QTX_CTL_VFVM_INDX_MASK);
472         I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
473         I40E_WRITE_FLUSH(hw);
474
475         return I40E_SUCCESS;
476 }
477
478 static int
479 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
480                                            uint8_t *msg,
481                                            uint16_t msglen,
482                                            bool b_op)
483 {
484         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
485         struct i40e_vsi *vsi = vf->vsi;
486         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
487                 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
488         struct i40e_virtchnl_queue_pair_info *vc_qpi;
489         int i, ret = I40E_SUCCESS;
490
491         if (!b_op) {
492                 i40e_pf_host_send_msg_to_vf(vf,
493                                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
494                                             I40E_NOT_SUPPORTED, NULL, 0);
495                 return ret;
496         }
497
498         if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
499                 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
500                 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
501                                         vc_vqci->num_queue_pairs)) {
502                 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
503                 ret = I40E_ERR_PARAM;
504                 goto send_msg;
505         }
506
507         vc_qpi = vc_vqci->qpair;
508         for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
509                 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
510                         vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
511                         ret = I40E_ERR_PARAM;
512                         goto send_msg;
513                 }
514
515                 /*
516                  * Apply VF RX queue setting to HMC.
517                  * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
518                  * then the extra information of
519                  * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
520                  * otherwise set the last parameter to NULL.
521                  */
522                 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
523                         I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
524                         PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
525                         ret = I40E_ERR_PARAM;
526                         goto send_msg;
527                 }
528
529                 /* Apply VF TX queue setting to HMC */
530                 if (i40e_pf_host_hmc_config_txq(hw, vf,
531                         &vc_qpi[i].txq) != I40E_SUCCESS) {
532                         PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
533                         ret = I40E_ERR_PARAM;
534                         goto send_msg;
535                 }
536         }
537
538 send_msg:
539         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
540                                                         ret, NULL, 0);
541
542         return ret;
543 }
544
545 static int
546 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
547                                                uint8_t *msg,
548                                                uint16_t msglen,
549                                                bool b_op)
550 {
551         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
552         struct i40e_vsi *vsi = vf->vsi;
553         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
554                 (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
555         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
556         int i, ret = I40E_SUCCESS;
557
558         if (!b_op) {
559                 i40e_pf_host_send_msg_to_vf(
560                         vf,
561                         I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
562                         I40E_NOT_SUPPORTED, NULL, 0);
563                 return ret;
564         }
565
566         if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
567                 vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
568                 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
569                                         vc_vqcei->num_queue_pairs)) {
570                 PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
571                 ret = I40E_ERR_PARAM;
572                 goto send_msg;
573         }
574
575         vc_qpei = vc_vqcei->qpair;
576         for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
577                 if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
578                         vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
579                         ret = I40E_ERR_PARAM;
580                         goto send_msg;
581                 }
582                 /*
583                  * Apply VF RX queue setting to HMC.
584                  * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
585                  * then the extra information of
586                  * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
587                  * otherwise set the last parameter to NULL.
588                  */
589                 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
590                         vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
591                         PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
592                         ret = I40E_ERR_PARAM;
593                         goto send_msg;
594                 }
595
596                 /* Apply VF TX queue setting to HMC */
597                 if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
598                                                         I40E_SUCCESS) {
599                         PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
600                         ret = I40E_ERR_PARAM;
601                         goto send_msg;
602                 }
603         }
604
605 send_msg:
606         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
607                                                                 ret, NULL, 0);
608
609         return ret;
610 }
611
612 static void
613 i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
614                               struct i40e_virtchnl_vector_map *vvm)
615 {
616 #define BITS_PER_CHAR 8
617         uint64_t linklistmap = 0, tempmap;
618         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
619         uint16_t qid;
620         bool b_first_q = true;
621         enum i40e_queue_type qtype;
622         uint16_t vector_id;
623         uint32_t reg, reg_idx;
624         uint16_t itr_idx = 0, i;
625
626         vector_id = vvm->vector_id;
627         /* setup the head */
628         if (!vector_id)
629                 reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
630         else
631                 reg_idx = I40E_VPINT_LNKLSTN(
632                 ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
633                 + (vector_id - 1));
634
635         if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
636                 I40E_WRITE_REG(hw, reg_idx,
637                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
638                 goto cfg_irq_done;
639         }
640
641         /* sort all rx and tx queues */
642         tempmap = vvm->rxq_map;
643         for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
644                 if (tempmap & 0x1)
645                         linklistmap |= (1 << (2 * i));
646                 tempmap >>= 1;
647         }
648
649         tempmap = vvm->txq_map;
650         for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
651                 if (tempmap & 0x1)
652                         linklistmap |= (1 << (2 * i + 1));
653                 tempmap >>= 1;
654         }
655
656         /* Link all rx and tx queues into a chained list */
657         tempmap = linklistmap;
658         i = 0;
659         b_first_q = true;
660         do {
661                 if (tempmap & 0x1) {
662                         qtype = (enum i40e_queue_type)(i % 2);
663                         qid = vf->vsi->base_queue + i / 2;
664                         if (b_first_q) {
665                                 /* This is header */
666                                 b_first_q = false;
667                                 reg = ((qtype <<
668                                 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
669                                 | qid);
670                         } else {
671                                 /* element in the link list */
672                                 reg = (vector_id) |
673                                 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
674                                 (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
675                                 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
676                                 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
677                         }
678                         I40E_WRITE_REG(hw, reg_idx, reg);
679                         /* find next register to program */
680                         switch (qtype) {
681                         case I40E_QUEUE_TYPE_RX:
682                                 reg_idx = I40E_QINT_RQCTL(qid);
683                                 itr_idx = vvm->rxitr_idx;
684                                 break;
685                         case I40E_QUEUE_TYPE_TX:
686                                 reg_idx = I40E_QINT_TQCTL(qid);
687                                 itr_idx = vvm->txitr_idx;
688                                 break;
689                         default:
690                                 break;
691                         }
692                 }
693                 i++;
694                 tempmap >>= 1;
695         } while (tempmap);
696
697         /* Terminate the link list */
698         reg = (vector_id) |
699                 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
700                 (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
701                 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
702                 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
703         I40E_WRITE_REG(hw, reg_idx, reg);
704
705 cfg_irq_done:
706         I40E_WRITE_FLUSH(hw);
707 }
708
709 static int
710 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
711                                         uint8_t *msg, uint16_t msglen,
712                                         bool b_op)
713 {
714         int ret = I40E_SUCCESS;
715         struct i40e_pf *pf = vf->pf;
716         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
717         struct i40e_virtchnl_irq_map_info *irqmap =
718             (struct i40e_virtchnl_irq_map_info *)msg;
719         struct i40e_virtchnl_vector_map *map;
720         int i;
721         uint16_t vector_id;
722         unsigned long qbit_max;
723
724         if (!b_op) {
725                 i40e_pf_host_send_msg_to_vf(
726                         vf,
727                         I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
728                         I40E_NOT_SUPPORTED, NULL, 0);
729                 return ret;
730         }
731
732         if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
733                 PMD_DRV_LOG(ERR, "buffer too short");
734                 ret = I40E_ERR_PARAM;
735                 goto send_msg;
736         }
737
738         /* PF host will support both DPDK VF or Linux VF driver, identify by
739          * number of vectors requested.
740          */
741
742         /* DPDK VF only requires single vector */
743         if (irqmap->num_vectors == 1) {
744                 /* This MSIX intr store the intr in VF range */
745                 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
746                 vf->vsi->nb_msix = irqmap->num_vectors;
747                 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
748
749                 /* Don't care how the TX/RX queue mapping with this vector.
750                  * Link all VF RX queues together. Only did mapping work.
751                  * VF can disable/enable the intr by itself.
752                  */
753                 i40e_vsi_queues_bind_intr(vf->vsi);
754                 goto send_msg;
755         }
756
757         /* Then, it's Linux VF driver */
758         qbit_max = 1 << pf->vf_nb_qp_max;
759         for (i = 0; i < irqmap->num_vectors; i++) {
760                 map = &irqmap->vecmap[i];
761
762                 vector_id = map->vector_id;
763                 /* validate msg params */
764                 if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
765                         ret = I40E_ERR_PARAM;
766                         goto send_msg;
767                 }
768
769                 if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
770                         i40e_pf_config_irq_link_list(vf, map);
771                 } else {
772                         /* configured queue size excceed limit */
773                         ret = I40E_ERR_PARAM;
774                         goto send_msg;
775                 }
776         }
777
778 send_msg:
779         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
780                                                         ret, NULL, 0);
781
782         return ret;
783 }
784
785 static int
786 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
787                            struct i40e_virtchnl_queue_select *qsel,
788                            bool on)
789 {
790         int ret = I40E_SUCCESS;
791         int i;
792         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
793         uint16_t baseq = vf->vsi->base_queue;
794
795         if (qsel->rx_queues + qsel->tx_queues == 0)
796                 return I40E_ERR_PARAM;
797
798         /* always enable RX first and disable last */
799         /* Enable RX if it's enable */
800         if (on) {
801                 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
802                         if (qsel->rx_queues & (1 << i)) {
803                                 ret = i40e_switch_rx_queue(hw, baseq + i, on);
804                                 if (ret != I40E_SUCCESS)
805                                         return ret;
806                         }
807         }
808
809         /* Enable/Disable TX */
810         for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
811                 if (qsel->tx_queues & (1 << i)) {
812                         ret = i40e_switch_tx_queue(hw, baseq + i, on);
813                         if (ret != I40E_SUCCESS)
814                                 return ret;
815                 }
816
817         /* disable RX last if it's disable */
818         if (!on) {
819                 /* disable RX */
820                 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
821                         if (qsel->rx_queues & (1 << i)) {
822                                 ret = i40e_switch_rx_queue(hw, baseq + i, on);
823                                 if (ret != I40E_SUCCESS)
824                                         return ret;
825                         }
826         }
827
828         return ret;
829 }
830
831 static int
832 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
833                                        uint8_t *msg,
834                                        uint16_t msglen)
835 {
836         int ret = I40E_SUCCESS;
837         struct i40e_virtchnl_queue_select *q_sel =
838                 (struct i40e_virtchnl_queue_select *)msg;
839
840         if (msg == NULL || msglen != sizeof(*q_sel)) {
841                 ret = I40E_ERR_PARAM;
842                 goto send_msg;
843         }
844         ret = i40e_pf_host_switch_queues(vf, q_sel, true);
845
846 send_msg:
847         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
848                                                         ret, NULL, 0);
849
850         return ret;
851 }
852
853 static int
854 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
855                                         uint8_t *msg,
856                                         uint16_t msglen,
857                                         bool b_op)
858 {
859         int ret = I40E_SUCCESS;
860         struct i40e_virtchnl_queue_select *q_sel =
861                 (struct i40e_virtchnl_queue_select *)msg;
862
863         if (!b_op) {
864                 i40e_pf_host_send_msg_to_vf(
865                         vf,
866                         I40E_VIRTCHNL_OP_DISABLE_QUEUES,
867                         I40E_NOT_SUPPORTED, NULL, 0);
868                 return ret;
869         }
870
871         if (msg == NULL || msglen != sizeof(*q_sel)) {
872                 ret = I40E_ERR_PARAM;
873                 goto send_msg;
874         }
875         ret = i40e_pf_host_switch_queues(vf, q_sel, false);
876
877 send_msg:
878         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
879                                                         ret, NULL, 0);
880
881         return ret;
882 }
883
884
885 static int
886 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
887                                            uint8_t *msg,
888                                            uint16_t msglen,
889                                            bool b_op)
890 {
891         int ret = I40E_SUCCESS;
892         struct i40e_virtchnl_ether_addr_list *addr_list =
893                         (struct i40e_virtchnl_ether_addr_list *)msg;
894         struct i40e_mac_filter_info filter;
895         int i;
896         struct ether_addr *mac;
897
898         if (!b_op) {
899                 i40e_pf_host_send_msg_to_vf(
900                         vf,
901                         I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
902                         I40E_NOT_SUPPORTED, NULL, 0);
903                 return ret;
904         }
905
906         memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
907
908         if (msg == NULL || msglen <= sizeof(*addr_list)) {
909                 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
910                 ret = I40E_ERR_PARAM;
911                 goto send_msg;
912         }
913
914         for (i = 0; i < addr_list->num_elements; i++) {
915                 mac = (struct ether_addr *)(addr_list->list[i].addr);
916                 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
917                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
918                 if (is_zero_ether_addr(mac) ||
919                     i40e_vsi_add_mac(vf->vsi, &filter)) {
920                         ret = I40E_ERR_INVALID_MAC_ADDR;
921                         goto send_msg;
922                 }
923         }
924
925 send_msg:
926         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
927                                                         ret, NULL, 0);
928
929         return ret;
930 }
931
932 static int
933 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
934                                            uint8_t *msg,
935                                            uint16_t msglen,
936                                            bool b_op)
937 {
938         int ret = I40E_SUCCESS;
939         struct i40e_virtchnl_ether_addr_list *addr_list =
940                 (struct i40e_virtchnl_ether_addr_list *)msg;
941         int i;
942         struct ether_addr *mac;
943
944         if (!b_op) {
945                 i40e_pf_host_send_msg_to_vf(
946                         vf,
947                         I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
948                         I40E_NOT_SUPPORTED, NULL, 0);
949                 return ret;
950         }
951
952         if (msg == NULL || msglen <= sizeof(*addr_list)) {
953                 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
954                 ret = I40E_ERR_PARAM;
955                 goto send_msg;
956         }
957
958         for (i = 0; i < addr_list->num_elements; i++) {
959                 mac = (struct ether_addr *)(addr_list->list[i].addr);
960                 if(is_zero_ether_addr(mac) ||
961                         i40e_vsi_delete_mac(vf->vsi, mac)) {
962                         ret = I40E_ERR_INVALID_MAC_ADDR;
963                         goto send_msg;
964                 }
965         }
966
967 send_msg:
968         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
969                                                         ret, NULL, 0);
970
971         return ret;
972 }
973
974 static int
975 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
976                                 uint8_t *msg, uint16_t msglen,
977                                 bool b_op)
978 {
979         int ret = I40E_SUCCESS;
980         struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
981                 (struct i40e_virtchnl_vlan_filter_list *)msg;
982         int i;
983         uint16_t *vid;
984
985         if (!b_op) {
986                 i40e_pf_host_send_msg_to_vf(
987                         vf,
988                         I40E_VIRTCHNL_OP_ADD_VLAN,
989                         I40E_NOT_SUPPORTED, NULL, 0);
990                 return ret;
991         }
992
993         if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
994                 PMD_DRV_LOG(ERR, "add_vlan argument too short");
995                 ret = I40E_ERR_PARAM;
996                 goto send_msg;
997         }
998
999         vid = vlan_filter_list->vlan_id;
1000
1001         for (i = 0; i < vlan_filter_list->num_elements; i++) {
1002                 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
1003                 if(ret != I40E_SUCCESS)
1004                         goto send_msg;
1005         }
1006
1007 send_msg:
1008         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1009                                                 ret, NULL, 0);
1010
1011         return ret;
1012 }
1013
1014 static int
1015 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
1016                                   uint8_t *msg,
1017                                   uint16_t msglen,
1018                                   bool b_op)
1019 {
1020         int ret = I40E_SUCCESS;
1021         struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
1022                         (struct i40e_virtchnl_vlan_filter_list *)msg;
1023         int i;
1024         uint16_t *vid;
1025
1026         if (!b_op) {
1027                 i40e_pf_host_send_msg_to_vf(
1028                         vf,
1029                         I40E_VIRTCHNL_OP_DEL_VLAN,
1030                         I40E_NOT_SUPPORTED, NULL, 0);
1031                 return ret;
1032         }
1033
1034         if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
1035                 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
1036                 ret = I40E_ERR_PARAM;
1037                 goto send_msg;
1038         }
1039
1040         vid = vlan_filter_list->vlan_id;
1041         for (i = 0; i < vlan_filter_list->num_elements; i++) {
1042                 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
1043                 if(ret != I40E_SUCCESS)
1044                         goto send_msg;
1045         }
1046
1047 send_msg:
1048         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1049                                                 ret, NULL, 0);
1050
1051         return ret;
1052 }
1053
1054 static int
1055 i40e_pf_host_process_cmd_config_promisc_mode(
1056                                         struct i40e_pf_vf *vf,
1057                                         uint8_t *msg,
1058                                         uint16_t msglen,
1059                                         bool b_op)
1060 {
1061         int ret = I40E_SUCCESS;
1062         struct i40e_virtchnl_promisc_info *promisc =
1063                                 (struct i40e_virtchnl_promisc_info *)msg;
1064         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
1065         bool unicast = FALSE, multicast = FALSE;
1066
1067         if (!b_op) {
1068                 i40e_pf_host_send_msg_to_vf(
1069                         vf,
1070                         I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1071                         I40E_NOT_SUPPORTED, NULL, 0);
1072                 return ret;
1073         }
1074
1075         if (msg == NULL || msglen != sizeof(*promisc)) {
1076                 ret = I40E_ERR_PARAM;
1077                 goto send_msg;
1078         }
1079
1080         if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1081                 unicast = TRUE;
1082         ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
1083                         vf->vsi->seid, unicast, NULL, true);
1084         if (ret != I40E_SUCCESS)
1085                 goto send_msg;
1086
1087         if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1088                 multicast = TRUE;
1089         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
1090                                                 multicast, NULL);
1091
1092 send_msg:
1093         i40e_pf_host_send_msg_to_vf(vf,
1094                 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
1095
1096         return ret;
1097 }
1098
1099 static int
1100 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
1101 {
1102         i40e_update_vsi_stats(vf->vsi);
1103
1104         if (b_op)
1105                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
1106                                             I40E_SUCCESS,
1107                                             (uint8_t *)&vf->vsi->eth_stats,
1108                                             sizeof(vf->vsi->eth_stats));
1109         else
1110                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
1111                                             I40E_NOT_SUPPORTED,
1112                                             (uint8_t *)&vf->vsi->eth_stats,
1113                                             sizeof(vf->vsi->eth_stats));
1114
1115         return I40E_SUCCESS;
1116 }
1117
1118 static int
1119 i40e_pf_host_process_cmd_cfg_vlan_offload(
1120                                         struct i40e_pf_vf *vf,
1121                                         uint8_t *msg,
1122                                         uint16_t msglen,
1123                                         bool b_op)
1124 {
1125         int ret = I40E_SUCCESS;
1126         struct i40e_virtchnl_vlan_offload_info *offload =
1127                         (struct i40e_virtchnl_vlan_offload_info *)msg;
1128
1129         if (!b_op) {
1130                 i40e_pf_host_send_msg_to_vf(
1131                         vf,
1132                         I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
1133                         I40E_NOT_SUPPORTED, NULL, 0);
1134                 return ret;
1135         }
1136
1137         if (msg == NULL || msglen != sizeof(*offload)) {
1138                 ret = I40E_ERR_PARAM;
1139                 goto send_msg;
1140         }
1141
1142         ret = i40e_vsi_config_vlan_stripping(vf->vsi,
1143                                                 !!offload->enable_vlan_strip);
1144         if (ret != 0)
1145                 PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
1146
1147 send_msg:
1148         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
1149                                         ret, NULL, 0);
1150
1151         return ret;
1152 }
1153
1154 static int
1155 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
1156                                         uint8_t *msg,
1157                                         uint16_t msglen,
1158                                         bool b_op)
1159 {
1160         int ret = I40E_SUCCESS;
1161         struct i40e_virtchnl_pvid_info  *tpid_info =
1162                         (struct i40e_virtchnl_pvid_info *)msg;
1163
1164         if (!b_op) {
1165                 i40e_pf_host_send_msg_to_vf(
1166                         vf,
1167                         I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1168                         I40E_NOT_SUPPORTED, NULL, 0);
1169                 return ret;
1170         }
1171
1172         if (msg == NULL || msglen != sizeof(*tpid_info)) {
1173                 ret = I40E_ERR_PARAM;
1174                 goto send_msg;
1175         }
1176
1177         ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
1178
1179 send_msg:
1180         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1181                                         ret, NULL, 0);
1182
1183         return ret;
1184 }
1185
1186 void
1187 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1188 {
1189         struct i40e_virtchnl_pf_event event;
1190
1191         event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1192         event.event_data.link_event.link_status =
1193                 dev->data->dev_link.link_status;
1194
1195         /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
1196         switch (dev->data->dev_link.link_speed) {
1197         case ETH_SPEED_NUM_100M:
1198                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
1199                 break;
1200         case ETH_SPEED_NUM_1G:
1201                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
1202                 break;
1203         case ETH_SPEED_NUM_10G:
1204                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
1205                 break;
1206         case ETH_SPEED_NUM_20G:
1207                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
1208                 break;
1209         case ETH_SPEED_NUM_25G:
1210                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
1211                 break;
1212         case ETH_SPEED_NUM_40G:
1213                 event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
1214                 break;
1215         default:
1216                 event.event_data.link_event.link_speed =
1217                         I40E_LINK_SPEED_UNKNOWN;
1218                 break;
1219         }
1220
1221         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
1222                 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1223 }
1224
1225 void
1226 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1227                            uint16_t abs_vf_id, uint32_t opcode,
1228                            __rte_unused uint32_t retval,
1229                            uint8_t *msg,
1230                            uint16_t msglen)
1231 {
1232         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1233         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1234         struct i40e_pf_vf *vf;
1235         /* AdminQ will pass absolute VF id, transfer to internal vf id */
1236         uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1237         struct rte_pmd_i40e_mb_event_param cb_param;
1238         bool b_op = TRUE;
1239
1240         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1241                 PMD_DRV_LOG(ERR, "invalid argument");
1242                 return;
1243         }
1244
1245         vf = &pf->vfs[vf_id];
1246         if (!vf->vsi) {
1247                 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1248                 i40e_pf_host_send_msg_to_vf(vf, opcode,
1249                         I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1250                 return;
1251         }
1252
1253         /**
1254          * initialise structure to send to user application
1255          * will return response from user in retval field
1256          */
1257         cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1258         cb_param.vfid = vf_id;
1259         cb_param.msg_type = opcode;
1260         cb_param.msg = (void *)msg;
1261         cb_param.msglen = msglen;
1262
1263         /**
1264          * Ask user application if we're allowed to perform those functions.
1265          * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1266          * then business as usual.
1267          * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1268          * do nothing and send not_supported to VF. As PF must send a response
1269          * to VF and ACK/NACK is not defined.
1270          */
1271         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
1272         if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1273                 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1274                             opcode);
1275                 b_op = FALSE;
1276         }
1277
1278         switch (opcode) {
1279         case I40E_VIRTCHNL_OP_VERSION :
1280                 PMD_DRV_LOG(INFO, "OP_VERSION received");
1281                 i40e_pf_host_process_cmd_version(vf, b_op);
1282                 break;
1283         case I40E_VIRTCHNL_OP_RESET_VF :
1284                 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1285                 i40e_pf_host_process_cmd_reset_vf(vf);
1286                 break;
1287         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1288                 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1289                 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1290                 break;
1291         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1292                 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1293                 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1294                                                            msglen, b_op);
1295                 break;
1296         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
1297                 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
1298                 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
1299                                                                msglen, b_op);
1300                 break;
1301         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1302                 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1303                 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1304                 break;
1305         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1306                 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1307                 if (b_op) {
1308                         i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1309                         i40e_notify_vf_link_status(dev, vf);
1310                 } else {
1311                         i40e_pf_host_send_msg_to_vf(
1312                                 vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1313                                 I40E_NOT_SUPPORTED, NULL, 0);
1314                 }
1315                 break;
1316         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1317                 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1318                 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1319                 break;
1320         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1321                 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1322                 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1323                                                            msglen, b_op);
1324                 break;
1325         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1326                 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1327                 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1328                                                            msglen, b_op);
1329                 break;
1330         case I40E_VIRTCHNL_OP_ADD_VLAN:
1331                 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1332                 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1333                 break;
1334         case I40E_VIRTCHNL_OP_DEL_VLAN:
1335                 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1336                 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1337                 break;
1338         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1339                 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1340                 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1341                                                              msglen, b_op);
1342                 break;
1343         case I40E_VIRTCHNL_OP_GET_STATS:
1344                 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1345                 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1346                 break;
1347         case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
1348                 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
1349                 i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
1350                                                           msglen, b_op);
1351                 break;
1352         case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
1353                 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
1354                 i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
1355                 break;
1356         /* Don't add command supported below, which will
1357          * return an error code.
1358          */
1359         default:
1360                 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1361                 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1362                                                                 NULL, 0);
1363                 break;
1364         }
1365 }
1366
1367 int
1368 i40e_pf_host_init(struct rte_eth_dev *dev)
1369 {
1370         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1371         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1372         int ret, i;
1373         uint32_t val;
1374
1375         PMD_INIT_FUNC_TRACE();
1376
1377         /**
1378          * return if SRIOV not enabled, VF number not configured or
1379          * no queue assigned.
1380          */
1381         if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1382                 return I40E_SUCCESS;
1383
1384         /* Allocate memory to store VF structure */
1385         pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1386         if(pf->vfs == NULL)
1387                 return -ENOMEM;
1388
1389         /* Disable irq0 for VFR event */
1390         i40e_pf_disable_irq0(hw);
1391
1392         /* Disable VF link status interrupt */
1393         val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1394         val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1395         I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1396         I40E_WRITE_FLUSH(hw);
1397
1398         for (i = 0; i < pf->vf_num; i++) {
1399                 pf->vfs[i].pf = pf;
1400                 pf->vfs[i].state = I40E_VF_INACTIVE;
1401                 pf->vfs[i].vf_idx = i;
1402                 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1403                 if (ret != I40E_SUCCESS)
1404                         goto fail;
1405         }
1406
1407         RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
1408         /* restore irq0 */
1409         i40e_pf_enable_irq0(hw);
1410
1411         return I40E_SUCCESS;
1412
1413 fail:
1414         rte_free(pf->vfs);
1415         i40e_pf_enable_irq0(hw);
1416
1417         return ret;
1418 }
1419
1420 int
1421 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1422 {
1423         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1424         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1425         uint32_t val;
1426
1427         PMD_INIT_FUNC_TRACE();
1428
1429         /**
1430          * return if SRIOV not enabled, VF number not configured or
1431          * no queue assigned.
1432          */
1433         if ((!hw->func_caps.sr_iov_1_1) ||
1434                 (pf->vf_num == 0) ||
1435                 (pf->vf_nb_qps == 0))
1436                 return I40E_SUCCESS;
1437
1438         /* free memory to store VF structure */
1439         rte_free(pf->vfs);
1440         pf->vfs = NULL;
1441
1442         /* Disable irq0 for VFR event */
1443         i40e_pf_disable_irq0(hw);
1444
1445         /* Disable VF link status interrupt */
1446         val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1447         val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1448         I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1449         I40E_WRITE_FLUSH(hw);
1450
1451         return I40E_SUCCESS;
1452 }