f6d8293473b6b623214280ebf7efae67c5ae4a5f
[deb_dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_ethdev_pci.h>
59 #include <rte_malloc.h>
60 #include <rte_dev.h>
61
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
66
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
69 #include "i40e_pf.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77
78 struct i40evf_arq_msg_info {
79         enum virtchnl_ops ops;
80         enum i40e_status_code result;
81         uint16_t buf_len;
82         uint16_t msg_len;
83         uint8_t *msg;
84 };
85
86 struct vf_cmd_info {
87         enum virtchnl_ops ops;
88         uint8_t *in_args;
89         uint32_t in_args_size;
90         uint8_t *out_buffer;
91         /* Input & output type. pass in buffer size and pass out
92          * actual return result
93          */
94         uint32_t out_size;
95 };
96
97 enum i40evf_aq_result {
98         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
99         I40EVF_MSG_NON,      /* Read nothing from admin queue */
100         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
101         I40EVF_MSG_CMD,      /* Read async command result */
102 };
103
104 static int i40evf_dev_configure(struct rte_eth_dev *dev);
105 static int i40evf_dev_start(struct rte_eth_dev *dev);
106 static void i40evf_dev_stop(struct rte_eth_dev *dev);
107 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
108                                 struct rte_eth_dev_info *dev_info);
109 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
110                                   int wait_to_complete);
111 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
112                                 struct rte_eth_stats *stats);
113 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
114                                  struct rte_eth_xstat *xstats, unsigned n);
115 static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
116                                        struct rte_eth_xstat_name *xstats_names,
117                                        unsigned limit);
118 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
119 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
120                                   uint16_t vlan_id, int on);
121 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
122 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
123                                 int on);
124 static void i40evf_dev_close(struct rte_eth_dev *dev);
125 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
126 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
127 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
128 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
129 static int i40evf_init_vlan(struct rte_eth_dev *dev);
130 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
131                                      uint16_t rx_queue_id);
132 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
133                                     uint16_t rx_queue_id);
134 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t tx_queue_id);
136 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t tx_queue_id);
138 static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
139                                struct ether_addr *addr,
140                                uint32_t index,
141                                uint32_t pool);
142 static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
143 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
144                         struct rte_eth_rss_reta_entry64 *reta_conf,
145                         uint16_t reta_size);
146 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
147                         struct rte_eth_rss_reta_entry64 *reta_conf,
148                         uint16_t reta_size);
149 static int i40evf_config_rss(struct i40e_vf *vf);
150 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
151                                       struct rte_eth_rss_conf *rss_conf);
152 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
153                                         struct rte_eth_rss_conf *rss_conf);
154 static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
155 static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
156                                         struct ether_addr *mac_addr);
157 static int
158 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
159 static int
160 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
161 static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
162                                    uint8_t *msg,
163                                    uint16_t msglen);
164
165 /* Default hash key buffer for RSS */
166 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
167
168 struct rte_i40evf_xstats_name_off {
169         char name[RTE_ETH_XSTATS_NAME_SIZE];
170         unsigned offset;
171 };
172
173 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
174         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
175         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
176         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
177         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
178         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
179         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
180                 rx_unknown_protocol)},
181         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
182         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
183         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
184         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
185         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
186         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
187 };
188
189 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
190                 sizeof(rte_i40evf_stats_strings[0]))
191
192 static const struct eth_dev_ops i40evf_eth_dev_ops = {
193         .dev_configure        = i40evf_dev_configure,
194         .dev_start            = i40evf_dev_start,
195         .dev_stop             = i40evf_dev_stop,
196         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
197         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
198         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
199         .allmulticast_disable = i40evf_dev_allmulticast_disable,
200         .link_update          = i40evf_dev_link_update,
201         .stats_get            = i40evf_dev_stats_get,
202         .xstats_get           = i40evf_dev_xstats_get,
203         .xstats_get_names     = i40evf_dev_xstats_get_names,
204         .xstats_reset         = i40evf_dev_xstats_reset,
205         .dev_close            = i40evf_dev_close,
206         .dev_infos_get        = i40evf_dev_info_get,
207         .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
208         .vlan_filter_set      = i40evf_vlan_filter_set,
209         .vlan_offload_set     = i40evf_vlan_offload_set,
210         .vlan_pvid_set        = i40evf_vlan_pvid_set,
211         .rx_queue_start       = i40evf_dev_rx_queue_start,
212         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
213         .tx_queue_start       = i40evf_dev_tx_queue_start,
214         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
215         .rx_queue_setup       = i40e_dev_rx_queue_setup,
216         .rx_queue_release     = i40e_dev_rx_queue_release,
217         .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
218         .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
219         .rx_descriptor_done   = i40e_dev_rx_descriptor_done,
220         .rx_descriptor_status = i40e_dev_rx_descriptor_status,
221         .tx_descriptor_status = i40e_dev_tx_descriptor_status,
222         .tx_queue_setup       = i40e_dev_tx_queue_setup,
223         .tx_queue_release     = i40e_dev_tx_queue_release,
224         .rx_queue_count       = i40e_dev_rx_queue_count,
225         .rxq_info_get         = i40e_rxq_info_get,
226         .txq_info_get         = i40e_txq_info_get,
227         .mac_addr_add         = i40evf_add_mac_addr,
228         .mac_addr_remove      = i40evf_del_mac_addr,
229         .reta_update          = i40evf_dev_rss_reta_update,
230         .reta_query           = i40evf_dev_rss_reta_query,
231         .rss_hash_update      = i40evf_dev_rss_hash_update,
232         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
233         .mtu_set              = i40evf_dev_mtu_set,
234         .mac_addr_set         = i40evf_set_default_mac_addr,
235 };
236
237 /*
238  * Read data in admin queue to get msg from pf driver
239  */
240 static enum i40evf_aq_result
241 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
242 {
243         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
244         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
245         struct i40e_arq_event_info event;
246         enum virtchnl_ops opcode;
247         enum i40e_status_code retval;
248         int ret;
249         enum i40evf_aq_result result = I40EVF_MSG_NON;
250
251         event.buf_len = data->buf_len;
252         event.msg_buf = data->msg;
253         ret = i40e_clean_arq_element(hw, &event, NULL);
254         /* Can't read any msg from adminQ */
255         if (ret) {
256                 if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
257                         result = I40EVF_MSG_ERR;
258                 return result;
259         }
260
261         opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
262         retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
263         /* pf sys event */
264         if (opcode == VIRTCHNL_OP_EVENT) {
265                 struct virtchnl_pf_event *vpe =
266                         (struct virtchnl_pf_event *)event.msg_buf;
267
268                 result = I40EVF_MSG_SYS;
269                 switch (vpe->event) {
270                 case VIRTCHNL_EVENT_LINK_CHANGE:
271                         vf->link_up =
272                                 vpe->event_data.link_event.link_status;
273                         vf->link_speed =
274                                 vpe->event_data.link_event.link_speed;
275                         vf->pend_msg |= PFMSG_LINK_CHANGE;
276                         PMD_DRV_LOG(INFO, "Link status update:%s",
277                                     vf->link_up ? "up" : "down");
278                         break;
279                 case VIRTCHNL_EVENT_RESET_IMPENDING:
280                         vf->vf_reset = true;
281                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
282                         PMD_DRV_LOG(INFO, "vf is reseting");
283                         break;
284                 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
285                         vf->dev_closed = true;
286                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
287                         PMD_DRV_LOG(INFO, "PF driver closed");
288                         break;
289                 default:
290                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
291                                     __func__, vpe->event);
292                 }
293         } else {
294                 /* async reply msg on command issued by vf previously */
295                 result = I40EVF_MSG_CMD;
296                 /* Actual data length read from PF */
297                 data->msg_len = event.msg_len;
298         }
299
300         data->result = retval;
301         data->ops = opcode;
302
303         return result;
304 }
305
306 /**
307  * clear current command. Only call in case execute
308  * _atomic_set_cmd successfully.
309  */
310 static inline void
311 _clear_cmd(struct i40e_vf *vf)
312 {
313         rte_wmb();
314         vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
315 }
316
317 /*
318  * Check there is pending cmd in execution. If none, set new command.
319  */
320 static inline int
321 _atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
322 {
323         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
324                         VIRTCHNL_OP_UNKNOWN, ops);
325
326         if (!ret)
327                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
328
329         return !ret;
330 }
331
332 #define MAX_TRY_TIMES 200
333 #define ASQ_DELAY_MS  10
334
335 static int
336 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
337 {
338         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
339         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
340         struct i40evf_arq_msg_info info;
341         enum i40evf_aq_result ret;
342         int err, i = 0;
343
344         if (_atomic_set_cmd(vf, args->ops))
345                 return -1;
346
347         info.msg = args->out_buffer;
348         info.buf_len = args->out_size;
349         info.ops = VIRTCHNL_OP_UNKNOWN;
350         info.result = I40E_SUCCESS;
351
352         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
353                      args->in_args, args->in_args_size, NULL);
354         if (err) {
355                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
356                 _clear_cmd(vf);
357                 return err;
358         }
359
360         switch (args->ops) {
361         case VIRTCHNL_OP_RESET_VF:
362                 /*no need to process in this function */
363                 err = 0;
364                 break;
365         case VIRTCHNL_OP_VERSION:
366         case VIRTCHNL_OP_GET_VF_RESOURCES:
367                 /* for init adminq commands, need to poll the response */
368                 err = -1;
369                 do {
370                         ret = i40evf_read_pfmsg(dev, &info);
371                         vf->cmd_retval = info.result;
372                         if (ret == I40EVF_MSG_CMD) {
373                                 err = 0;
374                                 break;
375                         } else if (ret == I40EVF_MSG_ERR)
376                                 break;
377                         rte_delay_ms(ASQ_DELAY_MS);
378                         /* If don't read msg or read sys event, continue */
379                 } while (i++ < MAX_TRY_TIMES);
380                 _clear_cmd(vf);
381                 break;
382
383         default:
384                 /* for other adminq in running time, waiting the cmd done flag */
385                 err = -1;
386                 do {
387                         if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
388                                 err = 0;
389                                 break;
390                         }
391                         rte_delay_ms(ASQ_DELAY_MS);
392                         /* If don't read msg or read sys event, continue */
393                 } while (i++ < MAX_TRY_TIMES);
394                 /* If there's no response is received, clear command */
395                 if (i >= MAX_TRY_TIMES) {
396                         PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
397                         _clear_cmd(vf);
398                 }
399                 break;
400         }
401
402         return err | vf->cmd_retval;
403 }
404
405 /*
406  * Check API version with sync wait until version read or fail from admin queue
407  */
408 static int
409 i40evf_check_api_version(struct rte_eth_dev *dev)
410 {
411         struct virtchnl_version_info version, *pver;
412         int err;
413         struct vf_cmd_info args;
414         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
415
416         version.major = VIRTCHNL_VERSION_MAJOR;
417         version.minor = VIRTCHNL_VERSION_MINOR;
418
419         args.ops = VIRTCHNL_OP_VERSION;
420         args.in_args = (uint8_t *)&version;
421         args.in_args_size = sizeof(version);
422         args.out_buffer = vf->aq_resp;
423         args.out_size = I40E_AQ_BUF_SZ;
424
425         err = i40evf_execute_vf_cmd(dev, &args);
426         if (err) {
427                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
428                 return err;
429         }
430
431         pver = (struct virtchnl_version_info *)args.out_buffer;
432         vf->version_major = pver->major;
433         vf->version_minor = pver->minor;
434         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
435                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
436         else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
437                 (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
438                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
439         else {
440                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
441                                         vf->version_major, vf->version_minor,
442                                                 VIRTCHNL_VERSION_MAJOR,
443                                                 VIRTCHNL_VERSION_MINOR);
444                 return -1;
445         }
446
447         return 0;
448 }
449
450 static int
451 i40evf_get_vf_resource(struct rte_eth_dev *dev)
452 {
453         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
454         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
455         int err;
456         struct vf_cmd_info args;
457         uint32_t caps, len;
458
459         args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
460         args.out_buffer = vf->aq_resp;
461         args.out_size = I40E_AQ_BUF_SZ;
462         if (PF_IS_V11(vf)) {
463                 caps = VIRTCHNL_VF_OFFLOAD_L2 |
464                        VIRTCHNL_VF_OFFLOAD_RSS_AQ |
465                        VIRTCHNL_VF_OFFLOAD_RSS_REG |
466                        VIRTCHNL_VF_OFFLOAD_VLAN |
467                        VIRTCHNL_VF_OFFLOAD_RX_POLLING;
468                 args.in_args = (uint8_t *)&caps;
469                 args.in_args_size = sizeof(caps);
470         } else {
471                 args.in_args = NULL;
472                 args.in_args_size = 0;
473         }
474         err = i40evf_execute_vf_cmd(dev, &args);
475
476         if (err) {
477                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
478                 return err;
479         }
480
481         len =  sizeof(struct virtchnl_vf_resource) +
482                 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
483
484         (void)rte_memcpy(vf->vf_res, args.out_buffer,
485                         RTE_MIN(args.out_size, len));
486         i40e_vf_parse_hw_config(hw, vf->vf_res);
487
488         return 0;
489 }
490
491 static int
492 i40evf_config_promisc(struct rte_eth_dev *dev,
493                       bool enable_unicast,
494                       bool enable_multicast)
495 {
496         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
497         int err;
498         struct vf_cmd_info args;
499         struct virtchnl_promisc_info promisc;
500
501         promisc.flags = 0;
502         promisc.vsi_id = vf->vsi_res->vsi_id;
503
504         if (enable_unicast)
505                 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
506
507         if (enable_multicast)
508                 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
509
510         args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
511         args.in_args = (uint8_t *)&promisc;
512         args.in_args_size = sizeof(promisc);
513         args.out_buffer = vf->aq_resp;
514         args.out_size = I40E_AQ_BUF_SZ;
515
516         err = i40evf_execute_vf_cmd(dev, &args);
517
518         if (err)
519                 PMD_DRV_LOG(ERR, "fail to execute command "
520                             "CONFIG_PROMISCUOUS_MODE");
521         return err;
522 }
523
524 static int
525 i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
526 {
527         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
528         struct vf_cmd_info args;
529         int ret;
530
531         memset(&args, 0, sizeof(args));
532         args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
533         args.in_args = NULL;
534         args.in_args_size = 0;
535         args.out_buffer = vf->aq_resp;
536         args.out_size = I40E_AQ_BUF_SZ;
537         ret = i40evf_execute_vf_cmd(dev, &args);
538         if (ret)
539                 PMD_DRV_LOG(ERR, "Failed to execute command of "
540                             "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
541
542         return ret;
543 }
544
545 static int
546 i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
547 {
548         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
549         struct vf_cmd_info args;
550         int ret;
551
552         memset(&args, 0, sizeof(args));
553         args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
554         args.in_args = NULL;
555         args.in_args_size = 0;
556         args.out_buffer = vf->aq_resp;
557         args.out_size = I40E_AQ_BUF_SZ;
558         ret = i40evf_execute_vf_cmd(dev, &args);
559         if (ret)
560                 PMD_DRV_LOG(ERR, "Failed to execute command of "
561                             "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
562
563         return ret;
564 }
565
566 static int
567 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
568                                 struct i40e_vsi_vlan_pvid_info *info)
569 {
570         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
571         int err;
572         struct vf_cmd_info args;
573         struct virtchnl_pvid_info tpid_info;
574
575         if (info == NULL) {
576                 PMD_DRV_LOG(ERR, "invalid parameters");
577                 return I40E_ERR_PARAM;
578         }
579
580         memset(&tpid_info, 0, sizeof(tpid_info));
581         tpid_info.vsi_id = vf->vsi_res->vsi_id;
582         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
583
584         args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
585         args.in_args = (uint8_t *)&tpid_info;
586         args.in_args_size = sizeof(tpid_info);
587         args.out_buffer = vf->aq_resp;
588         args.out_size = I40E_AQ_BUF_SZ;
589
590         err = i40evf_execute_vf_cmd(dev, &args);
591         if (err)
592                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
593
594         return err;
595 }
596
597 static void
598 i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
599                                   uint16_t vsi_id,
600                                   uint16_t queue_id,
601                                   uint16_t nb_txq,
602                                   struct i40e_tx_queue *txq)
603 {
604         txq_info->vsi_id = vsi_id;
605         txq_info->queue_id = queue_id;
606         if (queue_id < nb_txq) {
607                 txq_info->ring_len = txq->nb_tx_desc;
608                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
609         }
610 }
611
612 static void
613 i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
614                                   uint16_t vsi_id,
615                                   uint16_t queue_id,
616                                   uint16_t nb_rxq,
617                                   uint32_t max_pkt_size,
618                                   struct i40e_rx_queue *rxq)
619 {
620         rxq_info->vsi_id = vsi_id;
621         rxq_info->queue_id = queue_id;
622         rxq_info->max_pkt_size = max_pkt_size;
623         if (queue_id < nb_rxq) {
624                 rxq_info->ring_len = rxq->nb_rx_desc;
625                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
626                 rxq_info->databuffer_size =
627                         (rte_pktmbuf_data_room_size(rxq->mp) -
628                                 RTE_PKTMBUF_HEADROOM);
629         }
630 }
631
632 /* It configures VSI queues to co-work with Linux PF host */
633 static int
634 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
635 {
636         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
637         struct i40e_rx_queue **rxq =
638                 (struct i40e_rx_queue **)dev->data->rx_queues;
639         struct i40e_tx_queue **txq =
640                 (struct i40e_tx_queue **)dev->data->tx_queues;
641         struct virtchnl_vsi_queue_config_info *vc_vqci;
642         struct virtchnl_queue_pair_info *vc_qpi;
643         struct vf_cmd_info args;
644         uint16_t i, nb_qp = vf->num_queue_pairs;
645         const uint32_t size =
646                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
647         uint8_t buff[size];
648         int ret;
649
650         memset(buff, 0, sizeof(buff));
651         vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
652         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
653         vc_vqci->num_queue_pairs = nb_qp;
654
655         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
656                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
657                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
658                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
659                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
660                                         vf->max_pkt_len, rxq[i]);
661         }
662         memset(&args, 0, sizeof(args));
663         args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
664         args.in_args = (uint8_t *)vc_vqci;
665         args.in_args_size = size;
666         args.out_buffer = vf->aq_resp;
667         args.out_size = I40E_AQ_BUF_SZ;
668         ret = i40evf_execute_vf_cmd(dev, &args);
669         if (ret)
670                 PMD_DRV_LOG(ERR, "Failed to execute command of "
671                         "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
672
673         return ret;
674 }
675
676 /* It configures VSI queues to co-work with DPDK PF host */
677 static int
678 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
679 {
680         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
681         struct i40e_rx_queue **rxq =
682                 (struct i40e_rx_queue **)dev->data->rx_queues;
683         struct i40e_tx_queue **txq =
684                 (struct i40e_tx_queue **)dev->data->tx_queues;
685         struct virtchnl_vsi_queue_config_ext_info *vc_vqcei;
686         struct virtchnl_queue_pair_ext_info *vc_qpei;
687         struct vf_cmd_info args;
688         uint16_t i, nb_qp = vf->num_queue_pairs;
689         const uint32_t size =
690                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
691         uint8_t buff[size];
692         int ret;
693
694         memset(buff, 0, sizeof(buff));
695         vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff;
696         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
697         vc_vqcei->num_queue_pairs = nb_qp;
698         vc_qpei = vc_vqcei->qpair;
699         for (i = 0; i < nb_qp; i++, vc_qpei++) {
700                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
701                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
702                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
703                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
704                                         vf->max_pkt_len, rxq[i]);
705                 if (i < dev->data->nb_rx_queues)
706                         /*
707                          * It adds extra info for configuring VSI queues, which
708                          * is needed to enable the configurable crc stripping
709                          * in VF.
710                          */
711                         vc_qpei->rxq_ext.crcstrip =
712                                 dev->data->dev_conf.rxmode.hw_strip_crc;
713         }
714         memset(&args, 0, sizeof(args));
715         args.ops =
716                 (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
717         args.in_args = (uint8_t *)vc_vqcei;
718         args.in_args_size = size;
719         args.out_buffer = vf->aq_resp;
720         args.out_size = I40E_AQ_BUF_SZ;
721         ret = i40evf_execute_vf_cmd(dev, &args);
722         if (ret)
723                 PMD_DRV_LOG(ERR, "Failed to execute command of "
724                         "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
725
726         return ret;
727 }
728
729 static int
730 i40evf_configure_queues(struct rte_eth_dev *dev)
731 {
732         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
733
734         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
735                 /* To support DPDK PF host */
736                 return i40evf_configure_vsi_queues_ext(dev);
737         else
738                 /* To support Linux PF host */
739                 return i40evf_configure_vsi_queues(dev);
740 }
741
742 static int
743 i40evf_config_irq_map(struct rte_eth_dev *dev)
744 {
745         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
746         struct vf_cmd_info args;
747         uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
748                 sizeof(struct virtchnl_vector_map)];
749         struct virtchnl_irq_map_info *map_info;
750         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
751         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
752         uint32_t vector_id;
753         int i, err;
754
755         if (rte_intr_allow_others(intr_handle)) {
756                 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
757                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
758                 else
759                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
760         } else {
761                 vector_id = I40E_MISC_VEC_ID;
762         }
763
764         map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
765         map_info->num_vectors = 1;
766         map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
767         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
768         /* Alway use default dynamic MSIX interrupt */
769         map_info->vecmap[0].vector_id = vector_id;
770         /* Don't map any tx queue */
771         map_info->vecmap[0].txq_map = 0;
772         map_info->vecmap[0].rxq_map = 0;
773         for (i = 0; i < dev->data->nb_rx_queues; i++) {
774                 map_info->vecmap[0].rxq_map |= 1 << i;
775                 if (rte_intr_dp_is_en(intr_handle))
776                         intr_handle->intr_vec[i] = vector_id;
777         }
778
779         args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
780         args.in_args = (u8 *)cmd_buffer;
781         args.in_args_size = sizeof(cmd_buffer);
782         args.out_buffer = vf->aq_resp;
783         args.out_size = I40E_AQ_BUF_SZ;
784         err = i40evf_execute_vf_cmd(dev, &args);
785         if (err)
786                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
787
788         return err;
789 }
790
791 static int
792 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
793                                 bool on)
794 {
795         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
796         struct virtchnl_queue_select queue_select;
797         int err;
798         struct vf_cmd_info args;
799         memset(&queue_select, 0, sizeof(queue_select));
800         queue_select.vsi_id = vf->vsi_res->vsi_id;
801
802         if (isrx)
803                 queue_select.rx_queues |= 1 << qid;
804         else
805                 queue_select.tx_queues |= 1 << qid;
806
807         if (on)
808                 args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
809         else
810                 args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
811         args.in_args = (u8 *)&queue_select;
812         args.in_args_size = sizeof(queue_select);
813         args.out_buffer = vf->aq_resp;
814         args.out_size = I40E_AQ_BUF_SZ;
815         err = i40evf_execute_vf_cmd(dev, &args);
816         if (err)
817                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
818                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
819
820         return err;
821 }
822
823 static int
824 i40evf_start_queues(struct rte_eth_dev *dev)
825 {
826         struct rte_eth_dev_data *dev_data = dev->data;
827         int i;
828         struct i40e_rx_queue *rxq;
829         struct i40e_tx_queue *txq;
830
831         for (i = 0; i < dev->data->nb_rx_queues; i++) {
832                 rxq = dev_data->rx_queues[i];
833                 if (rxq->rx_deferred_start)
834                         continue;
835                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
836                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
837                         return -1;
838                 }
839         }
840
841         for (i = 0; i < dev->data->nb_tx_queues; i++) {
842                 txq = dev_data->tx_queues[i];
843                 if (txq->tx_deferred_start)
844                         continue;
845                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
846                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
847                         return -1;
848                 }
849         }
850
851         return 0;
852 }
853
854 static int
855 i40evf_stop_queues(struct rte_eth_dev *dev)
856 {
857         int i;
858
859         /* Stop TX queues first */
860         for (i = 0; i < dev->data->nb_tx_queues; i++) {
861                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
862                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
863                         return -1;
864                 }
865         }
866
867         /* Then stop RX queues */
868         for (i = 0; i < dev->data->nb_rx_queues; i++) {
869                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
870                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
871                         return -1;
872                 }
873         }
874
875         return 0;
876 }
877
878 static int
879 i40evf_add_mac_addr(struct rte_eth_dev *dev,
880                     struct ether_addr *addr,
881                     __rte_unused uint32_t index,
882                     __rte_unused uint32_t pool)
883 {
884         struct virtchnl_ether_addr_list *list;
885         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
886         uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
887                         sizeof(struct virtchnl_ether_addr)];
888         int err;
889         struct vf_cmd_info args;
890
891         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
892                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
893                             addr->addr_bytes[0], addr->addr_bytes[1],
894                             addr->addr_bytes[2], addr->addr_bytes[3],
895                             addr->addr_bytes[4], addr->addr_bytes[5]);
896                 return I40E_ERR_INVALID_MAC_ADDR;
897         }
898
899         list = (struct virtchnl_ether_addr_list *)cmd_buffer;
900         list->vsi_id = vf->vsi_res->vsi_id;
901         list->num_elements = 1;
902         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
903                                         sizeof(addr->addr_bytes));
904
905         args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
906         args.in_args = cmd_buffer;
907         args.in_args_size = sizeof(cmd_buffer);
908         args.out_buffer = vf->aq_resp;
909         args.out_size = I40E_AQ_BUF_SZ;
910         err = i40evf_execute_vf_cmd(dev, &args);
911         if (err)
912                 PMD_DRV_LOG(ERR, "fail to execute command "
913                             "OP_ADD_ETHER_ADDRESS");
914         else
915                 vf->vsi.mac_num++;
916
917         return err;
918 }
919
920 static void
921 i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
922                             struct ether_addr *addr)
923 {
924         struct virtchnl_ether_addr_list *list;
925         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
926         uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
927                         sizeof(struct virtchnl_ether_addr)];
928         int err;
929         struct vf_cmd_info args;
930
931         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
932                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
933                             addr->addr_bytes[0], addr->addr_bytes[1],
934                             addr->addr_bytes[2], addr->addr_bytes[3],
935                             addr->addr_bytes[4], addr->addr_bytes[5]);
936                 return;
937         }
938
939         list = (struct virtchnl_ether_addr_list *)cmd_buffer;
940         list->vsi_id = vf->vsi_res->vsi_id;
941         list->num_elements = 1;
942         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
943                         sizeof(addr->addr_bytes));
944
945         args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
946         args.in_args = cmd_buffer;
947         args.in_args_size = sizeof(cmd_buffer);
948         args.out_buffer = vf->aq_resp;
949         args.out_size = I40E_AQ_BUF_SZ;
950         err = i40evf_execute_vf_cmd(dev, &args);
951         if (err)
952                 PMD_DRV_LOG(ERR, "fail to execute command "
953                             "OP_DEL_ETHER_ADDRESS");
954         else
955                 vf->vsi.mac_num--;
956         return;
957 }
958
959 static void
960 i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
961 {
962         struct rte_eth_dev_data *data = dev->data;
963         struct ether_addr *addr;
964
965         addr = &data->mac_addrs[index];
966
967         i40evf_del_mac_addr_by_addr(dev, addr);
968 }
969
970 static int
971 i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
972 {
973         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
974         struct virtchnl_queue_select q_stats;
975         int err;
976         struct vf_cmd_info args;
977
978         memset(&q_stats, 0, sizeof(q_stats));
979         q_stats.vsi_id = vf->vsi_res->vsi_id;
980         args.ops = VIRTCHNL_OP_GET_STATS;
981         args.in_args = (u8 *)&q_stats;
982         args.in_args_size = sizeof(q_stats);
983         args.out_buffer = vf->aq_resp;
984         args.out_size = I40E_AQ_BUF_SZ;
985
986         err = i40evf_execute_vf_cmd(dev, &args);
987         if (err) {
988                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
989                 *pstats = NULL;
990                 return err;
991         }
992         *pstats = (struct i40e_eth_stats *)args.out_buffer;
993         return 0;
994 }
995
996 static int
997 i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
998 {
999         int ret;
1000         struct i40e_eth_stats *pstats = NULL;
1001
1002         ret = i40evf_update_stats(dev, &pstats);
1003         if (ret != 0)
1004                 return 0;
1005
1006         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1007                                                 pstats->rx_broadcast;
1008         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1009                                                 pstats->tx_unicast;
1010         stats->imissed = pstats->rx_discards;
1011         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1012         stats->ibytes = pstats->rx_bytes;
1013         stats->obytes = pstats->tx_bytes;
1014
1015         return 0;
1016 }
1017
1018 static void
1019 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
1020 {
1021         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1022         struct i40e_eth_stats *pstats = NULL;
1023
1024         /* read stat values to clear hardware registers */
1025         i40evf_update_stats(dev, &pstats);
1026
1027         /* set stats offset base on current values */
1028         vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
1029 }
1030
1031 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1032                                       struct rte_eth_xstat_name *xstats_names,
1033                                       __rte_unused unsigned limit)
1034 {
1035         unsigned i;
1036
1037         if (xstats_names != NULL)
1038                 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1039                         snprintf(xstats_names[i].name,
1040                                 sizeof(xstats_names[i].name),
1041                                 "%s", rte_i40evf_stats_strings[i].name);
1042                 }
1043         return I40EVF_NB_XSTATS;
1044 }
1045
1046 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
1047                                  struct rte_eth_xstat *xstats, unsigned n)
1048 {
1049         int ret;
1050         unsigned i;
1051         struct i40e_eth_stats *pstats = NULL;
1052
1053         if (n < I40EVF_NB_XSTATS)
1054                 return I40EVF_NB_XSTATS;
1055
1056         ret = i40evf_update_stats(dev, &pstats);
1057         if (ret != 0)
1058                 return 0;
1059
1060         if (!xstats)
1061                 return 0;
1062
1063         /* loop over xstats array and values from pstats */
1064         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1065                 xstats[i].id = i;
1066                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1067                         rte_i40evf_stats_strings[i].offset);
1068         }
1069
1070         return I40EVF_NB_XSTATS;
1071 }
1072
1073 static int
1074 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1075 {
1076         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1077         struct virtchnl_vlan_filter_list *vlan_list;
1078         uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1079                                                         sizeof(uint16_t)];
1080         int err;
1081         struct vf_cmd_info args;
1082
1083         vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1084         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1085         vlan_list->num_elements = 1;
1086         vlan_list->vlan_id[0] = vlanid;
1087
1088         args.ops = VIRTCHNL_OP_ADD_VLAN;
1089         args.in_args = (u8 *)&cmd_buffer;
1090         args.in_args_size = sizeof(cmd_buffer);
1091         args.out_buffer = vf->aq_resp;
1092         args.out_size = I40E_AQ_BUF_SZ;
1093         err = i40evf_execute_vf_cmd(dev, &args);
1094         if (err)
1095                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1096
1097         return err;
1098 }
1099
1100 static int
1101 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1102 {
1103         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1104         struct virtchnl_vlan_filter_list *vlan_list;
1105         uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1106                                                         sizeof(uint16_t)];
1107         int err;
1108         struct vf_cmd_info args;
1109
1110         vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1111         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1112         vlan_list->num_elements = 1;
1113         vlan_list->vlan_id[0] = vlanid;
1114
1115         args.ops = VIRTCHNL_OP_DEL_VLAN;
1116         args.in_args = (u8 *)&cmd_buffer;
1117         args.in_args_size = sizeof(cmd_buffer);
1118         args.out_buffer = vf->aq_resp;
1119         args.out_size = I40E_AQ_BUF_SZ;
1120         err = i40evf_execute_vf_cmd(dev, &args);
1121         if (err)
1122                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1123
1124         return err;
1125 }
1126
1127 static const struct rte_pci_id pci_id_i40evf_map[] = {
1128         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
1129         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
1130         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
1131         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
1132         { .vendor_id = 0, /* sentinel */ },
1133 };
1134
1135 static inline int
1136 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1137                                     struct rte_eth_link *link)
1138 {
1139         struct rte_eth_link *dst = &(dev->data->dev_link);
1140         struct rte_eth_link *src = link;
1141
1142         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1143                                         *(uint64_t *)src) == 0)
1144                 return -1;
1145
1146         return 0;
1147 }
1148
1149 /* Disable IRQ0 */
1150 static inline void
1151 i40evf_disable_irq0(struct i40e_hw *hw)
1152 {
1153         /* Disable all interrupt types */
1154         I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
1155         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1156                        I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1157         I40EVF_WRITE_FLUSH(hw);
1158 }
1159
1160 /* Enable IRQ0 */
1161 static inline void
1162 i40evf_enable_irq0(struct i40e_hw *hw)
1163 {
1164         /* Enable admin queue interrupt trigger */
1165         uint32_t val;
1166
1167         i40evf_disable_irq0(hw);
1168         val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
1169         val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
1170                 I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
1171         I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
1172
1173         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1174                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1175                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1176                 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1177
1178         I40EVF_WRITE_FLUSH(hw);
1179 }
1180
1181 static int
1182 i40evf_reset_vf(struct i40e_hw *hw)
1183 {
1184         int i, reset;
1185
1186         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1187                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1188                 return -1;
1189         }
1190         /**
1191           * After issuing vf reset command to pf, pf won't necessarily
1192           * reset vf, it depends on what state it exactly is. If it's not
1193           * initialized yet, it won't have vf reset since it's in a certain
1194           * state. If not, it will try to reset. Even vf is reset, pf will
1195           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1196           * it to ACTIVE. In this duration, vf may not catch the moment that
1197           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1198           */
1199         rte_delay_ms(200);
1200
1201         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1202                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1203                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1204                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1205                 if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset)
1206                         break;
1207                 else
1208                         rte_delay_ms(50);
1209         }
1210
1211         if (i >= MAX_RESET_WAIT_CNT) {
1212                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1213                 return -1;
1214         }
1215
1216         return 0;
1217 }
1218
1219 static int
1220 i40evf_init_vf(struct rte_eth_dev *dev)
1221 {
1222         int i, err, bufsz;
1223         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1225         uint16_t interval =
1226                 i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
1227
1228         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1229         vf->dev_data = dev->data;
1230         err = i40e_set_mac_type(hw);
1231         if (err) {
1232                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1233                 goto err;
1234         }
1235
1236         i40e_init_adminq_parameter(hw);
1237         err = i40e_init_adminq(hw);
1238         if (err) {
1239                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1240                 goto err;
1241         }
1242
1243         /* Reset VF and wait until it's complete */
1244         if (i40evf_reset_vf(hw)) {
1245                 PMD_INIT_LOG(ERR, "reset NIC failed");
1246                 goto err_aq;
1247         }
1248
1249         /* VF reset, shutdown admin queue and initialize again */
1250         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1251                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1252                 return -1;
1253         }
1254
1255         i40e_init_adminq_parameter(hw);
1256         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1257                 PMD_INIT_LOG(ERR, "init_adminq failed");
1258                 return -1;
1259         }
1260         vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
1261         if (!vf->aq_resp) {
1262                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1263                         goto err_aq;
1264         }
1265         if (i40evf_check_api_version(dev) != 0) {
1266                 PMD_INIT_LOG(ERR, "check_api version failed");
1267                 goto err_aq;
1268         }
1269         bufsz = sizeof(struct virtchnl_vf_resource) +
1270                 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1271         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1272         if (!vf->vf_res) {
1273                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1274                         goto err_aq;
1275         }
1276
1277         if (i40evf_get_vf_resource(dev) != 0) {
1278                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1279                 goto err_alloc;
1280         }
1281
1282         /* got VF config message back from PF, now we can parse it */
1283         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1284                 if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
1285                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1286         }
1287
1288         if (!vf->vsi_res) {
1289                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1290                 goto err_alloc;
1291         }
1292
1293         if (hw->mac.type == I40E_MAC_X722_VF)
1294                 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1295         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1296         vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type;
1297         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1298         vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1299
1300         /* Store the MAC address configured by host, or generate random one */
1301         if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1302                 vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
1303         else
1304                 eth_random_addr(hw->mac.addr); /* Generate a random one */
1305
1306         /* If the PF host is not DPDK, set the interval of ITR0 to max*/
1307         if (vf->version_major != I40E_DPDK_VERSION_MAJOR) {
1308                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1309                                (I40E_ITR_INDEX_DEFAULT <<
1310                                 I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1311                                (interval <<
1312                                 I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
1313                 I40EVF_WRITE_FLUSH(hw);
1314         }
1315
1316         return 0;
1317
1318 err_alloc:
1319         rte_free(vf->vf_res);
1320 err_aq:
1321         i40e_shutdown_adminq(hw); /* ignore error */
1322 err:
1323         return -1;
1324 }
1325
1326 static int
1327 i40evf_uninit_vf(struct rte_eth_dev *dev)
1328 {
1329         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1330         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331
1332         PMD_INIT_FUNC_TRACE();
1333
1334         if (hw->adapter_stopped == 0)
1335                 i40evf_dev_close(dev);
1336         rte_free(vf->vf_res);
1337         vf->vf_res = NULL;
1338         rte_free(vf->aq_resp);
1339         vf->aq_resp = NULL;
1340
1341         return 0;
1342 }
1343
1344 static void
1345 i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
1346                 __rte_unused uint16_t msglen)
1347 {
1348         struct virtchnl_pf_event *pf_msg =
1349                         (struct virtchnl_pf_event *)msg;
1350         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1351
1352         switch (pf_msg->event) {
1353         case VIRTCHNL_EVENT_RESET_IMPENDING:
1354                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
1355                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1356                                               NULL, NULL);
1357                 break;
1358         case VIRTCHNL_EVENT_LINK_CHANGE:
1359                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
1360                 vf->link_up = pf_msg->event_data.link_event.link_status;
1361                 vf->link_speed = pf_msg->event_data.link_event.link_speed;
1362                 break;
1363         case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
1364                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
1365                 break;
1366         default:
1367                 PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
1368                 break;
1369         }
1370 }
1371
1372 static void
1373 i40evf_handle_aq_msg(struct rte_eth_dev *dev)
1374 {
1375         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1376         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1377         struct i40e_arq_event_info info;
1378         uint16_t pending, aq_opc;
1379         enum virtchnl_ops msg_opc;
1380         enum i40e_status_code msg_ret;
1381         int ret;
1382
1383         info.buf_len = I40E_AQ_BUF_SZ;
1384         if (!vf->aq_resp) {
1385                 PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
1386                 return;
1387         }
1388         info.msg_buf = vf->aq_resp;
1389
1390         pending = 1;
1391         while (pending) {
1392                 ret = i40e_clean_arq_element(hw, &info, &pending);
1393
1394                 if (ret != I40E_SUCCESS) {
1395                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
1396                                     "ret: %d", ret);
1397                         break;
1398                 }
1399                 aq_opc = rte_le_to_cpu_16(info.desc.opcode);
1400                 /* For the message sent from pf to vf, opcode is stored in
1401                  * cookie_high of struct i40e_aq_desc, while return error code
1402                  * are stored in cookie_low, Which is done by
1403                  * i40e_aq_send_msg_to_vf in PF driver.*/
1404                 msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
1405                                                   info.desc.cookie_high);
1406                 msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
1407                                                   info.desc.cookie_low);
1408                 switch (aq_opc) {
1409                 case i40e_aqc_opc_send_msg_to_vf:
1410                         if (msg_opc == VIRTCHNL_OP_EVENT)
1411                                 /* process event*/
1412                                 i40evf_handle_pf_event(dev, info.msg_buf,
1413                                                        info.msg_len);
1414                         else {
1415                                 /* read message and it's expected one */
1416                                 if (msg_opc == vf->pend_cmd) {
1417                                         vf->cmd_retval = msg_ret;
1418                                         /* prevent compiler reordering */
1419                                         rte_compiler_barrier();
1420                                         _clear_cmd(vf);
1421                                 } else
1422                                         PMD_DRV_LOG(ERR, "command mismatch,"
1423                                                 "expect %u, get %u",
1424                                                 vf->pend_cmd, msg_opc);
1425                                 PMD_DRV_LOG(DEBUG, "adminq response is received,"
1426                                              " opcode = %d", msg_opc);
1427                         }
1428                         break;
1429                 default:
1430                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
1431                                     aq_opc);
1432                         break;
1433                 }
1434         }
1435 }
1436
1437 /**
1438  * Interrupt handler triggered by NIC  for handling
1439  * specific interrupt. Only adminq interrupt is processed in VF.
1440  *
1441  * @param handle
1442  *  Pointer to interrupt handle.
1443  * @param param
1444  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1445  *
1446  * @return
1447  *  void
1448  */
1449 static void
1450 i40evf_dev_interrupt_handler(void *param)
1451 {
1452         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1453         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1454         uint32_t icr0;
1455
1456         i40evf_disable_irq0(hw);
1457
1458         /* read out interrupt causes */
1459         icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
1460
1461         /* No interrupt event indicated */
1462         if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
1463                 PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
1464                 goto done;
1465         }
1466
1467         if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
1468                 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
1469                 i40evf_handle_aq_msg(dev);
1470         }
1471
1472         /* Link Status Change interrupt */
1473         if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
1474                 PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
1475                                    " do nothing");
1476
1477 done:
1478         i40evf_enable_irq0(hw);
1479         rte_intr_enable(dev->intr_handle);
1480 }
1481
1482 static int
1483 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1484 {
1485         struct i40e_hw *hw
1486                 = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1487         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1488
1489         PMD_INIT_FUNC_TRACE();
1490
1491         /* assign ops func pointer */
1492         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1493         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1494         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1495
1496         /*
1497          * For secondary processes, we don't initialise any further as primary
1498          * has already done this work.
1499          */
1500         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1501                 i40e_set_rx_function(eth_dev);
1502                 i40e_set_tx_function(eth_dev);
1503                 return 0;
1504         }
1505         i40e_set_default_ptype_table(eth_dev);
1506         rte_eth_copy_pci_info(eth_dev, pci_dev);
1507         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1508
1509         hw->vendor_id = pci_dev->id.vendor_id;
1510         hw->device_id = pci_dev->id.device_id;
1511         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1512         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1513         hw->bus.device = pci_dev->addr.devid;
1514         hw->bus.func = pci_dev->addr.function;
1515         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1516         hw->adapter_stopped = 0;
1517
1518         if(i40evf_init_vf(eth_dev) != 0) {
1519                 PMD_INIT_LOG(ERR, "Init vf failed");
1520                 return -1;
1521         }
1522
1523         /* register callback func to eal lib */
1524         rte_intr_callback_register(&pci_dev->intr_handle,
1525                 i40evf_dev_interrupt_handler, (void *)eth_dev);
1526
1527         /* enable uio intr after callback register */
1528         rte_intr_enable(&pci_dev->intr_handle);
1529
1530         /* configure and enable device interrupt */
1531         i40evf_enable_irq0(hw);
1532
1533         /* copy mac addr */
1534         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1535                                         ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1536                                         0);
1537         if (eth_dev->data->mac_addrs == NULL) {
1538                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1539                                 " store MAC addresses",
1540                                 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1541                 return -ENOMEM;
1542         }
1543         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1544                         &eth_dev->data->mac_addrs[0]);
1545
1546         return 0;
1547 }
1548
1549 static int
1550 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1551 {
1552         PMD_INIT_FUNC_TRACE();
1553
1554         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1555                 return -EPERM;
1556
1557         eth_dev->dev_ops = NULL;
1558         eth_dev->rx_pkt_burst = NULL;
1559         eth_dev->tx_pkt_burst = NULL;
1560
1561         if (i40evf_uninit_vf(eth_dev) != 0) {
1562                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1563                 return -1;
1564         }
1565
1566         rte_free(eth_dev->data->mac_addrs);
1567         eth_dev->data->mac_addrs = NULL;
1568
1569         return 0;
1570 }
1571
1572 static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1573         struct rte_pci_device *pci_dev)
1574 {
1575         return rte_eth_dev_pci_generic_probe(pci_dev,
1576                 sizeof(struct i40e_adapter), i40evf_dev_init);
1577 }
1578
1579 static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
1580 {
1581         return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
1582 }
1583
1584 /*
1585  * virtual function driver struct
1586  */
1587 static struct rte_pci_driver rte_i40evf_pmd = {
1588         .id_table = pci_id_i40evf_map,
1589         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1590         .probe = eth_i40evf_pci_probe,
1591         .remove = eth_i40evf_pci_remove,
1592 };
1593
1594 RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
1595 RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
1596 RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
1597
1598 static int
1599 i40evf_dev_configure(struct rte_eth_dev *dev)
1600 {
1601         struct i40e_adapter *ad =
1602                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1603         struct rte_eth_conf *conf = &dev->data->dev_conf;
1604         struct i40e_vf *vf;
1605
1606         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1607          * allocation or vector Rx preconditions we will reset it.
1608          */
1609         ad->rx_bulk_alloc_allowed = true;
1610         ad->rx_vec_allowed = true;
1611         ad->tx_simple_allowed = true;
1612         ad->tx_vec_allowed = true;
1613
1614         /* For non-DPDK PF drivers, VF has no ability to disable HW
1615          * CRC strip, and is implicitly enabled by the PF.
1616          */
1617         if (!conf->rxmode.hw_strip_crc) {
1618                 vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1619                 if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
1620                     (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
1621                         /* Peer is running non-DPDK PF driver. */
1622                         PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
1623                         return -EINVAL;
1624                 }
1625         }
1626
1627         return i40evf_init_vlan(dev);
1628 }
1629
1630 static int
1631 i40evf_init_vlan(struct rte_eth_dev *dev)
1632 {
1633         struct rte_eth_dev_data *data = dev->data;
1634         int ret;
1635
1636         /* Apply vlan offload setting */
1637         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1638
1639         /* Apply pvid setting */
1640         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1641                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1642         return ret;
1643 }
1644
1645 static void
1646 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1647 {
1648         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1649
1650         /* Vlan stripping setting */
1651         if (mask & ETH_VLAN_STRIP_MASK) {
1652                 /* Enable or disable VLAN stripping */
1653                 if (dev_conf->rxmode.hw_vlan_strip)
1654                         i40evf_enable_vlan_strip(dev);
1655                 else
1656                         i40evf_disable_vlan_strip(dev);
1657         }
1658 }
1659
1660 static int
1661 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1662 {
1663         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1664         struct i40e_vsi_vlan_pvid_info info;
1665         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1666
1667         memset(&info, 0, sizeof(info));
1668         info.on = on;
1669
1670         /* Linux pf host don't support vlan offload yet */
1671         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1672                 if (info.on)
1673                         info.config.pvid = pvid;
1674                 else {
1675                         info.config.reject.tagged =
1676                                 dev_conf->txmode.hw_vlan_reject_tagged;
1677                         info.config.reject.untagged =
1678                                 dev_conf->txmode.hw_vlan_reject_untagged;
1679                 }
1680                 return i40evf_config_vlan_pvid(dev, &info);
1681         }
1682
1683         return 0;
1684 }
1685
1686 static int
1687 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1688 {
1689         struct i40e_rx_queue *rxq;
1690         int err = 0;
1691         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1692
1693         PMD_INIT_FUNC_TRACE();
1694
1695         if (rx_queue_id < dev->data->nb_rx_queues) {
1696                 rxq = dev->data->rx_queues[rx_queue_id];
1697
1698                 err = i40e_alloc_rx_queue_mbufs(rxq);
1699                 if (err) {
1700                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1701                         return err;
1702                 }
1703
1704                 rte_wmb();
1705
1706                 /* Init the RX tail register. */
1707                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1708                 I40EVF_WRITE_FLUSH(hw);
1709
1710                 /* Ready to switch the queue on */
1711                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1712
1713                 if (err)
1714                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1715                                     rx_queue_id);
1716                 else
1717                         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1718         }
1719
1720         return err;
1721 }
1722
1723 static int
1724 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1725 {
1726         struct i40e_rx_queue *rxq;
1727         int err;
1728
1729         if (rx_queue_id < dev->data->nb_rx_queues) {
1730                 rxq = dev->data->rx_queues[rx_queue_id];
1731
1732                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1733
1734                 if (err) {
1735                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1736                                     rx_queue_id);
1737                         return err;
1738                 }
1739
1740                 i40e_rx_queue_release_mbufs(rxq);
1741                 i40e_reset_rx_queue(rxq);
1742                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1743         }
1744
1745         return 0;
1746 }
1747
1748 static int
1749 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1750 {
1751         int err = 0;
1752
1753         PMD_INIT_FUNC_TRACE();
1754
1755         if (tx_queue_id < dev->data->nb_tx_queues) {
1756
1757                 /* Ready to switch the queue on */
1758                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1759
1760                 if (err)
1761                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1762                                     tx_queue_id);
1763                 else
1764                         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1765         }
1766
1767         return err;
1768 }
1769
1770 static int
1771 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1772 {
1773         struct i40e_tx_queue *txq;
1774         int err;
1775
1776         if (tx_queue_id < dev->data->nb_tx_queues) {
1777                 txq = dev->data->tx_queues[tx_queue_id];
1778
1779                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1780
1781                 if (err) {
1782                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1783                                     tx_queue_id);
1784                         return err;
1785                 }
1786
1787                 i40e_tx_queue_release_mbufs(txq);
1788                 i40e_reset_tx_queue(txq);
1789                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int
1796 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1797 {
1798         int ret;
1799
1800         if (on)
1801                 ret = i40evf_add_vlan(dev, vlan_id);
1802         else
1803                 ret = i40evf_del_vlan(dev,vlan_id);
1804
1805         return ret;
1806 }
1807
1808 static int
1809 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1810 {
1811         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1812         struct rte_eth_dev_data *dev_data = dev->data;
1813         struct rte_pktmbuf_pool_private *mbp_priv;
1814         uint16_t buf_size, len;
1815
1816         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1817         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1818         I40EVF_WRITE_FLUSH(hw);
1819
1820         /* Calculate the maximum packet length allowed */
1821         mbp_priv = rte_mempool_get_priv(rxq->mp);
1822         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1823                                         RTE_PKTMBUF_HEADROOM);
1824         rxq->hs_mode = i40e_header_split_none;
1825         rxq->rx_hdr_len = 0;
1826         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1827         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1828         rxq->max_pkt_len = RTE_MIN(len,
1829                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1830
1831         /**
1832          * Check if the jumbo frame and maximum packet length are set correctly
1833          */
1834         if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1835                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1836                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1837                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1838                                 "larger than %u and smaller than %u, as jumbo "
1839                                 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1840                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1841                         return I40E_ERR_CONFIG;
1842                 }
1843         } else {
1844                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1845                     rxq->max_pkt_len > ETHER_MAX_LEN) {
1846                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1847                                 "larger than %u and smaller than %u, as jumbo "
1848                                 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1849                                                 (uint32_t)ETHER_MAX_LEN);
1850                         return I40E_ERR_CONFIG;
1851                 }
1852         }
1853
1854         if (dev_data->dev_conf.rxmode.enable_scatter ||
1855             (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1856                 dev_data->scattered_rx = 1;
1857         }
1858
1859         return 0;
1860 }
1861
1862 static int
1863 i40evf_rx_init(struct rte_eth_dev *dev)
1864 {
1865         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1866         uint16_t i;
1867         int ret = I40E_SUCCESS;
1868         struct i40e_rx_queue **rxq =
1869                 (struct i40e_rx_queue **)dev->data->rx_queues;
1870
1871         i40evf_config_rss(vf);
1872         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1873                 if (!rxq[i] || !rxq[i]->q_set)
1874                         continue;
1875                 ret = i40evf_rxq_init(dev, rxq[i]);
1876                 if (ret != I40E_SUCCESS)
1877                         break;
1878         }
1879         if (ret == I40E_SUCCESS)
1880                 i40e_set_rx_function(dev);
1881
1882         return ret;
1883 }
1884
1885 static void
1886 i40evf_tx_init(struct rte_eth_dev *dev)
1887 {
1888         uint16_t i;
1889         struct i40e_tx_queue **txq =
1890                 (struct i40e_tx_queue **)dev->data->tx_queues;
1891         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892
1893         for (i = 0; i < dev->data->nb_tx_queues; i++)
1894                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1895
1896         i40e_set_tx_function(dev);
1897 }
1898
1899 static inline void
1900 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1901 {
1902         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1903         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1904         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1905         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1906
1907         if (!rte_intr_allow_others(intr_handle)) {
1908                 I40E_WRITE_REG(hw,
1909                                I40E_VFINT_DYN_CTL01,
1910                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1911                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1912                                I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1913                 I40EVF_WRITE_FLUSH(hw);
1914                 return;
1915         }
1916
1917         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1918                 /* To support DPDK PF host */
1919                 I40E_WRITE_REG(hw,
1920                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1921                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1922                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1923         /* If host driver is kernel driver, do nothing.
1924          * Interrupt 0 is used for rx packets, but don't set
1925          * I40E_VFINT_DYN_CTL01,
1926          * because it is already done in i40evf_enable_irq0.
1927          */
1928
1929         I40EVF_WRITE_FLUSH(hw);
1930 }
1931
1932 static inline void
1933 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1934 {
1935         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1936         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1938         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1939
1940         if (!rte_intr_allow_others(intr_handle)) {
1941                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1942                                I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1943                 I40EVF_WRITE_FLUSH(hw);
1944                 return;
1945         }
1946
1947         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1948                 I40E_WRITE_REG(hw,
1949                                I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
1950                                                     - 1),
1951                                0);
1952         /* If host driver is kernel driver, do nothing.
1953          * Interrupt 0 is used for rx packets, but don't zero
1954          * I40E_VFINT_DYN_CTL01,
1955          * because interrupt 0 is also used for adminq processing.
1956          */
1957
1958         I40EVF_WRITE_FLUSH(hw);
1959 }
1960
1961 static int
1962 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1963 {
1964         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1965         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1966         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1967         uint16_t interval =
1968                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1969         uint16_t msix_intr;
1970
1971         msix_intr = intr_handle->intr_vec[queue_id];
1972         if (msix_intr == I40E_MISC_VEC_ID)
1973                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1974                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1975                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1976                                (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1977                                (interval <<
1978                                 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1979         else
1980                 I40E_WRITE_REG(hw,
1981                                I40E_VFINT_DYN_CTLN1(msix_intr -
1982                                                     I40E_RX_VEC_START),
1983                                I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1984                                I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1985                                (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1986                                (interval <<
1987                                 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1988
1989         I40EVF_WRITE_FLUSH(hw);
1990
1991         rte_intr_enable(&pci_dev->intr_handle);
1992
1993         return 0;
1994 }
1995
1996 static int
1997 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1998 {
1999         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2000         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2001         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2002         uint16_t msix_intr;
2003
2004         msix_intr = intr_handle->intr_vec[queue_id];
2005         if (msix_intr == I40E_MISC_VEC_ID)
2006                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
2007         else
2008                 I40E_WRITE_REG(hw,
2009                                I40E_VFINT_DYN_CTLN1(msix_intr -
2010                                                     I40E_RX_VEC_START),
2011                                0);
2012
2013         I40EVF_WRITE_FLUSH(hw);
2014
2015         return 0;
2016 }
2017
2018 static void
2019 i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
2020 {
2021         struct virtchnl_ether_addr_list *list;
2022         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2023         int err, i, j;
2024         int next_begin = 0;
2025         int begin = 0;
2026         uint32_t len;
2027         struct ether_addr *addr;
2028         struct vf_cmd_info args;
2029
2030         do {
2031                 j = 0;
2032                 len = sizeof(struct virtchnl_ether_addr_list);
2033                 for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
2034                         if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
2035                                 continue;
2036                         len += sizeof(struct virtchnl_ether_addr);
2037                         if (len >= I40E_AQ_BUF_SZ) {
2038                                 next_begin = i + 1;
2039                                 break;
2040                         }
2041                 }
2042
2043                 list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
2044                 if (!list) {
2045                         PMD_DRV_LOG(ERR, "fail to allocate memory");
2046                         return;
2047                 }
2048
2049                 for (i = begin; i < next_begin; i++) {
2050                         addr = &dev->data->mac_addrs[i];
2051                         if (is_zero_ether_addr(addr))
2052                                 continue;
2053                         (void)rte_memcpy(list->list[j].addr, addr->addr_bytes,
2054                                          sizeof(addr->addr_bytes));
2055                         PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
2056                                     addr->addr_bytes[0], addr->addr_bytes[1],
2057                                     addr->addr_bytes[2], addr->addr_bytes[3],
2058                                     addr->addr_bytes[4], addr->addr_bytes[5]);
2059                         j++;
2060                 }
2061                 list->vsi_id = vf->vsi_res->vsi_id;
2062                 list->num_elements = j;
2063                 args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
2064                            VIRTCHNL_OP_DEL_ETH_ADDR;
2065                 args.in_args = (uint8_t *)list;
2066                 args.in_args_size = len;
2067                 args.out_buffer = vf->aq_resp;
2068                 args.out_size = I40E_AQ_BUF_SZ;
2069                 err = i40evf_execute_vf_cmd(dev, &args);
2070                 if (err) {
2071                         PMD_DRV_LOG(ERR, "fail to execute command %s",
2072                                     add ? "OP_ADD_ETHER_ADDRESS" :
2073                                     "OP_DEL_ETHER_ADDRESS");
2074                 } else {
2075                         if (add)
2076                                 vf->vsi.mac_num++;
2077                         else
2078                                 vf->vsi.mac_num--;
2079                 }
2080                 rte_free(list);
2081                 begin = next_begin;
2082         } while (begin < I40E_NUM_MACADDR_MAX);
2083 }
2084
2085 static int
2086 i40evf_dev_start(struct rte_eth_dev *dev)
2087 {
2088         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2089         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2091         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2092         uint32_t intr_vector = 0;
2093
2094         PMD_INIT_FUNC_TRACE();
2095
2096         hw->adapter_stopped = 0;
2097
2098         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2099         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
2100                                         dev->data->nb_tx_queues);
2101
2102         /* check and configure queue intr-vector mapping */
2103         if (dev->data->dev_conf.intr_conf.rxq != 0) {
2104                 intr_vector = dev->data->nb_rx_queues;
2105                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2106                         return -1;
2107         }
2108
2109         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2110                 intr_handle->intr_vec =
2111                         rte_zmalloc("intr_vec",
2112                                     dev->data->nb_rx_queues * sizeof(int), 0);
2113                 if (!intr_handle->intr_vec) {
2114                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2115                                      " intr_vec", dev->data->nb_rx_queues);
2116                         return -ENOMEM;
2117                 }
2118         }
2119
2120         if (i40evf_rx_init(dev) != 0){
2121                 PMD_DRV_LOG(ERR, "failed to do RX init");
2122                 return -1;
2123         }
2124
2125         i40evf_tx_init(dev);
2126
2127         if (i40evf_configure_queues(dev) != 0) {
2128                 PMD_DRV_LOG(ERR, "configure queues failed");
2129                 goto err_queue;
2130         }
2131         if (i40evf_config_irq_map(dev)) {
2132                 PMD_DRV_LOG(ERR, "config_irq_map failed");
2133                 goto err_queue;
2134         }
2135
2136         /* Set all mac addrs */
2137         i40evf_add_del_all_mac_addr(dev, TRUE);
2138
2139         if (i40evf_start_queues(dev) != 0) {
2140                 PMD_DRV_LOG(ERR, "enable queues failed");
2141                 goto err_mac;
2142         }
2143
2144         i40evf_enable_queues_intr(dev);
2145         return 0;
2146
2147 err_mac:
2148         i40evf_add_del_all_mac_addr(dev, FALSE);
2149 err_queue:
2150         return -1;
2151 }
2152
2153 static void
2154 i40evf_dev_stop(struct rte_eth_dev *dev)
2155 {
2156         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2157         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2158         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev);
2159
2160         PMD_INIT_FUNC_TRACE();
2161
2162         if (hw->adapter_stopped == 1)
2163                 return;
2164         i40evf_stop_queues(dev);
2165         i40evf_disable_queues_intr(dev);
2166         i40e_dev_clear_queues(dev);
2167
2168         /* Clean datapath event and queue/vec mapping */
2169         rte_intr_efd_disable(intr_handle);
2170         if (intr_handle->intr_vec) {
2171                 rte_free(intr_handle->intr_vec);
2172                 intr_handle->intr_vec = NULL;
2173         }
2174         /* remove all mac addrs */
2175         i40evf_add_del_all_mac_addr(dev, FALSE);
2176         hw->adapter_stopped = 1;
2177
2178 }
2179
2180 static int
2181 i40evf_dev_link_update(struct rte_eth_dev *dev,
2182                        __rte_unused int wait_to_complete)
2183 {
2184         struct rte_eth_link new_link;
2185         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2186         /*
2187          * DPDK pf host provide interfacet to acquire link status
2188          * while Linux driver does not
2189          */
2190
2191         /* Linux driver PF host */
2192         switch (vf->link_speed) {
2193         case I40E_LINK_SPEED_100MB:
2194                 new_link.link_speed = ETH_SPEED_NUM_100M;
2195                 break;
2196         case I40E_LINK_SPEED_1GB:
2197                 new_link.link_speed = ETH_SPEED_NUM_1G;
2198                 break;
2199         case I40E_LINK_SPEED_10GB:
2200                 new_link.link_speed = ETH_SPEED_NUM_10G;
2201                 break;
2202         case I40E_LINK_SPEED_20GB:
2203                 new_link.link_speed = ETH_SPEED_NUM_20G;
2204                 break;
2205         case I40E_LINK_SPEED_25GB:
2206                 new_link.link_speed = ETH_SPEED_NUM_25G;
2207                 break;
2208         case I40E_LINK_SPEED_40GB:
2209                 new_link.link_speed = ETH_SPEED_NUM_40G;
2210                 break;
2211         default:
2212                 new_link.link_speed = ETH_SPEED_NUM_100M;
2213                 break;
2214         }
2215         /* full duplex only */
2216         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
2217         new_link.link_status = vf->link_up ? ETH_LINK_UP :
2218                                              ETH_LINK_DOWN;
2219
2220         i40evf_dev_atomic_write_link_status(dev, &new_link);
2221
2222         return 0;
2223 }
2224
2225 static void
2226 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
2227 {
2228         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2229         int ret;
2230
2231         /* If enabled, just return */
2232         if (vf->promisc_unicast_enabled)
2233                 return;
2234
2235         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
2236         if (ret == 0)
2237                 vf->promisc_unicast_enabled = TRUE;
2238 }
2239
2240 static void
2241 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
2242 {
2243         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2244         int ret;
2245
2246         /* If disabled, just return */
2247         if (!vf->promisc_unicast_enabled)
2248                 return;
2249
2250         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
2251         if (ret == 0)
2252                 vf->promisc_unicast_enabled = FALSE;
2253 }
2254
2255 static void
2256 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2257 {
2258         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2259         int ret;
2260
2261         /* If enabled, just return */
2262         if (vf->promisc_multicast_enabled)
2263                 return;
2264
2265         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2266         if (ret == 0)
2267                 vf->promisc_multicast_enabled = TRUE;
2268 }
2269
2270 static void
2271 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2272 {
2273         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2274         int ret;
2275
2276         /* If enabled, just return */
2277         if (!vf->promisc_multicast_enabled)
2278                 return;
2279
2280         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2281         if (ret == 0)
2282                 vf->promisc_multicast_enabled = FALSE;
2283 }
2284
2285 static void
2286 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2287 {
2288         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2289
2290         memset(dev_info, 0, sizeof(*dev_info));
2291         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2292         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
2293         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
2294         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2295         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2296         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2297         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
2298         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2299         dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
2300         dev_info->rx_offload_capa =
2301                 DEV_RX_OFFLOAD_VLAN_STRIP |
2302                 DEV_RX_OFFLOAD_QINQ_STRIP |
2303                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2304                 DEV_RX_OFFLOAD_UDP_CKSUM |
2305                 DEV_RX_OFFLOAD_TCP_CKSUM;
2306         dev_info->tx_offload_capa =
2307                 DEV_TX_OFFLOAD_VLAN_INSERT |
2308                 DEV_TX_OFFLOAD_QINQ_INSERT |
2309                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2310                 DEV_TX_OFFLOAD_UDP_CKSUM |
2311                 DEV_TX_OFFLOAD_TCP_CKSUM |
2312                 DEV_TX_OFFLOAD_SCTP_CKSUM;
2313
2314         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2315                 .rx_thresh = {
2316                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2317                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2318                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2319                 },
2320                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2321                 .rx_drop_en = 0,
2322         };
2323
2324         dev_info->default_txconf = (struct rte_eth_txconf) {
2325                 .tx_thresh = {
2326                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2327                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2328                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2329                 },
2330                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2331                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2332                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2333                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2334         };
2335
2336         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2337                 .nb_max = I40E_MAX_RING_DESC,
2338                 .nb_min = I40E_MIN_RING_DESC,
2339                 .nb_align = I40E_ALIGN_RING_DESC,
2340         };
2341
2342         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2343                 .nb_max = I40E_MAX_RING_DESC,
2344                 .nb_min = I40E_MIN_RING_DESC,
2345                 .nb_align = I40E_ALIGN_RING_DESC,
2346         };
2347 }
2348
2349 static void
2350 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2351 {
2352         if (i40evf_get_statistics(dev, stats))
2353                 PMD_DRV_LOG(ERR, "Get statistics failed");
2354 }
2355
2356 static void
2357 i40evf_dev_close(struct rte_eth_dev *dev)
2358 {
2359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2361         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2362
2363         i40evf_dev_stop(dev);
2364         i40e_dev_free_queues(dev);
2365         i40evf_reset_vf(hw);
2366         i40e_shutdown_adminq(hw);
2367         /* disable uio intr before callback unregister */
2368         rte_intr_disable(intr_handle);
2369
2370         /* unregister callback func from eal lib */
2371         rte_intr_callback_unregister(intr_handle,
2372                                      i40evf_dev_interrupt_handler, dev);
2373         i40evf_disable_irq0(hw);
2374 }
2375
2376 static int
2377 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2378 {
2379         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2380         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2381         int ret;
2382
2383         if (!lut)
2384                 return -EINVAL;
2385
2386         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2387                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2388                                           lut, lut_size);
2389                 if (ret) {
2390                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2391                         return ret;
2392                 }
2393         } else {
2394                 uint32_t *lut_dw = (uint32_t *)lut;
2395                 uint16_t i, lut_size_dw = lut_size / 4;
2396
2397                 for (i = 0; i < lut_size_dw; i++)
2398                         lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2399         }
2400
2401         return 0;
2402 }
2403
2404 static int
2405 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2406 {
2407         struct i40e_vf *vf;
2408         struct i40e_hw *hw;
2409         int ret;
2410
2411         if (!vsi || !lut)
2412                 return -EINVAL;
2413
2414         vf = I40E_VSI_TO_VF(vsi);
2415         hw = I40E_VSI_TO_HW(vsi);
2416
2417         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2418                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2419                                           lut, lut_size);
2420                 if (ret) {
2421                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2422                         return ret;
2423                 }
2424         } else {
2425                 uint32_t *lut_dw = (uint32_t *)lut;
2426                 uint16_t i, lut_size_dw = lut_size / 4;
2427
2428                 for (i = 0; i < lut_size_dw; i++)
2429                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2430                 I40EVF_WRITE_FLUSH(hw);
2431         }
2432
2433         return 0;
2434 }
2435
2436 static int
2437 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2438                            struct rte_eth_rss_reta_entry64 *reta_conf,
2439                            uint16_t reta_size)
2440 {
2441         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2442         uint8_t *lut;
2443         uint16_t i, idx, shift;
2444         int ret;
2445
2446         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2447                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2448                         "(%d) doesn't match the number of hardware can "
2449                         "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2450                 return -EINVAL;
2451         }
2452
2453         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2454         if (!lut) {
2455                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2456                 return -ENOMEM;
2457         }
2458         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2459         if (ret)
2460                 goto out;
2461         for (i = 0; i < reta_size; i++) {
2462                 idx = i / RTE_RETA_GROUP_SIZE;
2463                 shift = i % RTE_RETA_GROUP_SIZE;
2464                 if (reta_conf[idx].mask & (1ULL << shift))
2465                         lut[i] = reta_conf[idx].reta[shift];
2466         }
2467         ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2468
2469 out:
2470         rte_free(lut);
2471
2472         return ret;
2473 }
2474
2475 static int
2476 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2477                           struct rte_eth_rss_reta_entry64 *reta_conf,
2478                           uint16_t reta_size)
2479 {
2480         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2481         uint16_t i, idx, shift;
2482         uint8_t *lut;
2483         int ret;
2484
2485         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2486                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2487                         "(%d) doesn't match the number of hardware can "
2488                         "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2489                 return -EINVAL;
2490         }
2491
2492         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2493         if (!lut) {
2494                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2495                 return -ENOMEM;
2496         }
2497
2498         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2499         if (ret)
2500                 goto out;
2501         for (i = 0; i < reta_size; i++) {
2502                 idx = i / RTE_RETA_GROUP_SIZE;
2503                 shift = i % RTE_RETA_GROUP_SIZE;
2504                 if (reta_conf[idx].mask & (1ULL << shift))
2505                         reta_conf[idx].reta[shift] = lut[i];
2506         }
2507
2508 out:
2509         rte_free(lut);
2510
2511         return ret;
2512 }
2513
2514 static int
2515 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2516 {
2517         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2518         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2519         int ret = 0;
2520
2521         if (!key || key_len == 0) {
2522                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2523                 return 0;
2524         } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2525                 sizeof(uint32_t)) {
2526                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2527                 return -EINVAL;
2528         }
2529
2530         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2531                 struct i40e_aqc_get_set_rss_key_data *key_dw =
2532                         (struct i40e_aqc_get_set_rss_key_data *)key;
2533
2534                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2535                 if (ret)
2536                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2537                                      "via AQ");
2538         } else {
2539                 uint32_t *hash_key = (uint32_t *)key;
2540                 uint16_t i;
2541
2542                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2543                         i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2544                 I40EVF_WRITE_FLUSH(hw);
2545         }
2546
2547         return ret;
2548 }
2549
2550 static int
2551 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2552 {
2553         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2554         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2555         int ret;
2556
2557         if (!key || !key_len)
2558                 return -EINVAL;
2559
2560         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2561                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2562                         (struct i40e_aqc_get_set_rss_key_data *)key);
2563                 if (ret) {
2564                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2565                         return ret;
2566                 }
2567         } else {
2568                 uint32_t *key_dw = (uint32_t *)key;
2569                 uint16_t i;
2570
2571                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2572                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2573         }
2574         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2575
2576         return 0;
2577 }
2578
2579 static int
2580 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2581 {
2582         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2583         uint64_t rss_hf, hena;
2584         int ret;
2585
2586         ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2587                                  rss_conf->rss_key_len);
2588         if (ret)
2589                 return ret;
2590
2591         rss_hf = rss_conf->rss_hf;
2592         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2593         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2594         if (hw->mac.type == I40E_MAC_X722)
2595                 hena &= ~I40E_RSS_HENA_ALL_X722;
2596         else
2597                 hena &= ~I40E_RSS_HENA_ALL;
2598         hena |= i40e_config_hena(rss_hf, hw->mac.type);
2599         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2600         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2601         I40EVF_WRITE_FLUSH(hw);
2602
2603         return 0;
2604 }
2605
2606 static void
2607 i40evf_disable_rss(struct i40e_vf *vf)
2608 {
2609         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2610         uint64_t hena;
2611
2612         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2613         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2614         if (hw->mac.type == I40E_MAC_X722)
2615                 hena &= ~I40E_RSS_HENA_ALL_X722;
2616         else
2617                 hena &= ~I40E_RSS_HENA_ALL;
2618         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2619         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2620         I40EVF_WRITE_FLUSH(hw);
2621 }
2622
2623 static int
2624 i40evf_config_rss(struct i40e_vf *vf)
2625 {
2626         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2627         struct rte_eth_rss_conf rss_conf;
2628         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2629         uint16_t num;
2630
2631         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2632                 i40evf_disable_rss(vf);
2633                 PMD_DRV_LOG(DEBUG, "RSS not configured");
2634                 return 0;
2635         }
2636
2637         num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2638         /* Fill out the look up table */
2639         for (i = 0, j = 0; i < nb_q; i++, j++) {
2640                 if (j >= num)
2641                         j = 0;
2642                 lut = (lut << 8) | j;
2643                 if ((i & 3) == 3)
2644                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2645         }
2646
2647         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2648         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2649                 i40evf_disable_rss(vf);
2650                 PMD_DRV_LOG(DEBUG, "No hash flag is set");
2651                 return 0;
2652         }
2653
2654         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2655                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2656                 /* Calculate the default hash key */
2657                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2658                         rss_key_default[i] = (uint32_t)rte_rand();
2659                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2660                 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2661                         sizeof(uint32_t);
2662         }
2663
2664         return i40evf_hw_rss_hash_set(vf, &rss_conf);
2665 }
2666
2667 static int
2668 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2669                            struct rte_eth_rss_conf *rss_conf)
2670 {
2671         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2672         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2673         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2674         uint64_t hena;
2675
2676         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2677         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2678         if (!(hena & ((hw->mac.type == I40E_MAC_X722)
2679                  ? I40E_RSS_HENA_ALL_X722
2680                  : I40E_RSS_HENA_ALL))) { /* RSS disabled */
2681                 if (rss_hf != 0) /* Enable RSS */
2682                         return -EINVAL;
2683                 return 0;
2684         }
2685
2686         /* RSS enabled */
2687         if (rss_hf == 0) /* Disable RSS */
2688                 return -EINVAL;
2689
2690         return i40evf_hw_rss_hash_set(vf, rss_conf);
2691 }
2692
2693 static int
2694 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2695                              struct rte_eth_rss_conf *rss_conf)
2696 {
2697         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2698         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2699         uint64_t hena;
2700
2701         i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2702                            &rss_conf->rss_key_len);
2703
2704         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2705         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2706         rss_conf->rss_hf = i40e_parse_hena(hena);
2707
2708         return 0;
2709 }
2710
2711 static int
2712 i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2713 {
2714         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2715         struct rte_eth_dev_data *dev_data = vf->dev_data;
2716         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
2717         int ret = 0;
2718
2719         /* check if mtu is within the allowed range */
2720         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
2721                 return -EINVAL;
2722
2723         /* mtu setting is forbidden if port is start */
2724         if (dev_data->dev_started) {
2725                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
2726                             dev_data->port_id);
2727                 return -EBUSY;
2728         }
2729
2730         if (frame_size > ETHER_MAX_LEN)
2731                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
2732         else
2733                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
2734
2735         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2736
2737         return ret;
2738 }
2739
2740 static void
2741 i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
2742                             struct ether_addr *mac_addr)
2743 {
2744         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2745
2746         if (!is_valid_assigned_ether_addr(mac_addr)) {
2747                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2748                 return;
2749         }
2750
2751         if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
2752                 return;
2753
2754         if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
2755                 return;
2756
2757         i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
2758
2759         i40evf_add_mac_addr(dev, mac_addr, 0, 0);
2760 }