4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_bus_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_ethdev_pci.h>
59 #include <rte_malloc.h>
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
71 /* busy wait delay in msec */
72 #define I40EVF_BUSY_WAIT_DELAY 10
73 #define I40EVF_BUSY_WAIT_COUNT 50
74 #define MAX_RESET_WAIT_CNT 20
76 struct i40evf_arq_msg_info {
77 enum virtchnl_ops ops;
78 enum i40e_status_code result;
85 enum virtchnl_ops ops;
87 uint32_t in_args_size;
89 /* Input & output type. pass in buffer size and pass out
90 * actual return result
95 enum i40evf_aq_result {
96 I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
97 I40EVF_MSG_NON, /* Read nothing from admin queue */
98 I40EVF_MSG_SYS, /* Read system msg from admin queue */
99 I40EVF_MSG_CMD, /* Read async command result */
102 static int i40evf_dev_configure(struct rte_eth_dev *dev);
103 static int i40evf_dev_start(struct rte_eth_dev *dev);
104 static void i40evf_dev_stop(struct rte_eth_dev *dev);
105 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
106 struct rte_eth_dev_info *dev_info);
107 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
108 int wait_to_complete);
109 static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
110 struct rte_eth_stats *stats);
111 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
112 struct rte_eth_xstat *xstats, unsigned n);
113 static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
114 struct rte_eth_xstat_name *xstats_names,
116 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
117 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
118 uint16_t vlan_id, int on);
119 static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
120 static void i40evf_dev_close(struct rte_eth_dev *dev);
121 static int i40evf_dev_reset(struct rte_eth_dev *dev);
122 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40evf_init_vlan(struct rte_eth_dev *dev);
127 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
128 uint16_t rx_queue_id);
129 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
130 uint16_t rx_queue_id);
131 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
132 uint16_t tx_queue_id);
133 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
134 uint16_t tx_queue_id);
135 static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
136 struct ether_addr *addr,
139 static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
140 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
141 struct rte_eth_rss_reta_entry64 *reta_conf,
143 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
144 struct rte_eth_rss_reta_entry64 *reta_conf,
146 static int i40evf_config_rss(struct i40e_vf *vf);
147 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
148 struct rte_eth_rss_conf *rss_conf);
149 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
150 struct rte_eth_rss_conf *rss_conf);
151 static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
152 static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
153 struct ether_addr *mac_addr);
155 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
157 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
158 static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
162 /* Default hash key buffer for RSS */
163 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
165 struct rte_i40evf_xstats_name_off {
166 char name[RTE_ETH_XSTATS_NAME_SIZE];
170 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
171 {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
172 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
173 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
174 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
175 {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
176 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
177 rx_unknown_protocol)},
178 {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
179 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
180 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
181 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
182 {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
183 {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
186 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
187 sizeof(rte_i40evf_stats_strings[0]))
189 static const struct eth_dev_ops i40evf_eth_dev_ops = {
190 .dev_configure = i40evf_dev_configure,
191 .dev_start = i40evf_dev_start,
192 .dev_stop = i40evf_dev_stop,
193 .promiscuous_enable = i40evf_dev_promiscuous_enable,
194 .promiscuous_disable = i40evf_dev_promiscuous_disable,
195 .allmulticast_enable = i40evf_dev_allmulticast_enable,
196 .allmulticast_disable = i40evf_dev_allmulticast_disable,
197 .link_update = i40evf_dev_link_update,
198 .stats_get = i40evf_dev_stats_get,
199 .stats_reset = i40evf_dev_xstats_reset,
200 .xstats_get = i40evf_dev_xstats_get,
201 .xstats_get_names = i40evf_dev_xstats_get_names,
202 .xstats_reset = i40evf_dev_xstats_reset,
203 .dev_close = i40evf_dev_close,
204 .dev_reset = i40evf_dev_reset,
205 .dev_infos_get = i40evf_dev_info_get,
206 .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
207 .vlan_filter_set = i40evf_vlan_filter_set,
208 .vlan_offload_set = i40evf_vlan_offload_set,
209 .rx_queue_start = i40evf_dev_rx_queue_start,
210 .rx_queue_stop = i40evf_dev_rx_queue_stop,
211 .tx_queue_start = i40evf_dev_tx_queue_start,
212 .tx_queue_stop = i40evf_dev_tx_queue_stop,
213 .rx_queue_setup = i40e_dev_rx_queue_setup,
214 .rx_queue_release = i40e_dev_rx_queue_release,
215 .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
216 .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
217 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
218 .rx_descriptor_status = i40e_dev_rx_descriptor_status,
219 .tx_descriptor_status = i40e_dev_tx_descriptor_status,
220 .tx_queue_setup = i40e_dev_tx_queue_setup,
221 .tx_queue_release = i40e_dev_tx_queue_release,
222 .rx_queue_count = i40e_dev_rx_queue_count,
223 .rxq_info_get = i40e_rxq_info_get,
224 .txq_info_get = i40e_txq_info_get,
225 .mac_addr_add = i40evf_add_mac_addr,
226 .mac_addr_remove = i40evf_del_mac_addr,
227 .reta_update = i40evf_dev_rss_reta_update,
228 .reta_query = i40evf_dev_rss_reta_query,
229 .rss_hash_update = i40evf_dev_rss_hash_update,
230 .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
231 .mtu_set = i40evf_dev_mtu_set,
232 .mac_addr_set = i40evf_set_default_mac_addr,
236 * Read data in admin queue to get msg from pf driver
238 static enum i40evf_aq_result
239 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
241 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
242 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
243 struct i40e_arq_event_info event;
244 enum virtchnl_ops opcode;
245 enum i40e_status_code retval;
247 enum i40evf_aq_result result = I40EVF_MSG_NON;
249 event.buf_len = data->buf_len;
250 event.msg_buf = data->msg;
251 ret = i40e_clean_arq_element(hw, &event, NULL);
252 /* Can't read any msg from adminQ */
254 if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
255 result = I40EVF_MSG_ERR;
259 opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
260 retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
262 if (opcode == VIRTCHNL_OP_EVENT) {
263 struct virtchnl_pf_event *vpe =
264 (struct virtchnl_pf_event *)event.msg_buf;
266 result = I40EVF_MSG_SYS;
267 switch (vpe->event) {
268 case VIRTCHNL_EVENT_LINK_CHANGE:
270 vpe->event_data.link_event.link_status;
272 vpe->event_data.link_event.link_speed;
273 vf->pend_msg |= PFMSG_LINK_CHANGE;
274 PMD_DRV_LOG(INFO, "Link status update:%s",
275 vf->link_up ? "up" : "down");
277 case VIRTCHNL_EVENT_RESET_IMPENDING:
279 vf->pend_msg |= PFMSG_RESET_IMPENDING;
280 PMD_DRV_LOG(INFO, "vf is reseting");
282 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
283 vf->dev_closed = true;
284 vf->pend_msg |= PFMSG_DRIVER_CLOSE;
285 PMD_DRV_LOG(INFO, "PF driver closed");
288 PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
289 __func__, vpe->event);
292 /* async reply msg on command issued by vf previously */
293 result = I40EVF_MSG_CMD;
294 /* Actual data length read from PF */
295 data->msg_len = event.msg_len;
298 data->result = retval;
305 * clear current command. Only call in case execute
306 * _atomic_set_cmd successfully.
309 _clear_cmd(struct i40e_vf *vf)
312 vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
316 * Check there is pending cmd in execution. If none, set new command.
319 _atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
321 int ret = rte_atomic32_cmpset(&vf->pend_cmd,
322 VIRTCHNL_OP_UNKNOWN, ops);
325 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
330 #define MAX_TRY_TIMES 200
331 #define ASQ_DELAY_MS 10
334 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
336 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
338 struct i40evf_arq_msg_info info;
339 enum i40evf_aq_result ret;
342 if (_atomic_set_cmd(vf, args->ops))
345 info.msg = args->out_buffer;
346 info.buf_len = args->out_size;
347 info.ops = VIRTCHNL_OP_UNKNOWN;
348 info.result = I40E_SUCCESS;
350 err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
351 args->in_args, args->in_args_size, NULL);
353 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
359 case VIRTCHNL_OP_RESET_VF:
360 /*no need to process in this function */
363 case VIRTCHNL_OP_VERSION:
364 case VIRTCHNL_OP_GET_VF_RESOURCES:
365 /* for init adminq commands, need to poll the response */
368 ret = i40evf_read_pfmsg(dev, &info);
369 vf->cmd_retval = info.result;
370 if (ret == I40EVF_MSG_CMD) {
373 } else if (ret == I40EVF_MSG_ERR)
375 rte_delay_ms(ASQ_DELAY_MS);
376 /* If don't read msg or read sys event, continue */
377 } while (i++ < MAX_TRY_TIMES);
382 /* for other adminq in running time, waiting the cmd done flag */
385 if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
389 rte_delay_ms(ASQ_DELAY_MS);
390 /* If don't read msg or read sys event, continue */
391 } while (i++ < MAX_TRY_TIMES);
392 /* If there's no response is received, clear command */
393 if (i >= MAX_TRY_TIMES) {
394 PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
400 return err | vf->cmd_retval;
404 * Check API version with sync wait until version read or fail from admin queue
407 i40evf_check_api_version(struct rte_eth_dev *dev)
409 struct virtchnl_version_info version, *pver;
411 struct vf_cmd_info args;
412 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
414 version.major = VIRTCHNL_VERSION_MAJOR;
415 version.minor = VIRTCHNL_VERSION_MINOR;
417 args.ops = VIRTCHNL_OP_VERSION;
418 args.in_args = (uint8_t *)&version;
419 args.in_args_size = sizeof(version);
420 args.out_buffer = vf->aq_resp;
421 args.out_size = I40E_AQ_BUF_SZ;
423 err = i40evf_execute_vf_cmd(dev, &args);
425 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
429 pver = (struct virtchnl_version_info *)args.out_buffer;
430 vf->version_major = pver->major;
431 vf->version_minor = pver->minor;
432 if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
433 (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
434 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
436 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
437 vf->version_major, vf->version_minor,
438 VIRTCHNL_VERSION_MAJOR,
439 VIRTCHNL_VERSION_MINOR);
447 i40evf_get_vf_resource(struct rte_eth_dev *dev)
449 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
450 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
452 struct vf_cmd_info args;
455 args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
456 args.out_buffer = vf->aq_resp;
457 args.out_size = I40E_AQ_BUF_SZ;
459 caps = VIRTCHNL_VF_OFFLOAD_L2 |
460 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
461 VIRTCHNL_VF_OFFLOAD_RSS_REG |
462 VIRTCHNL_VF_OFFLOAD_VLAN |
463 VIRTCHNL_VF_OFFLOAD_RX_POLLING;
464 args.in_args = (uint8_t *)∩︀
465 args.in_args_size = sizeof(caps);
468 args.in_args_size = 0;
470 err = i40evf_execute_vf_cmd(dev, &args);
473 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
477 len = sizeof(struct virtchnl_vf_resource) +
478 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
480 rte_memcpy(vf->vf_res, args.out_buffer,
481 RTE_MIN(args.out_size, len));
482 i40e_vf_parse_hw_config(hw, vf->vf_res);
488 i40evf_config_promisc(struct rte_eth_dev *dev,
490 bool enable_multicast)
492 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
494 struct vf_cmd_info args;
495 struct virtchnl_promisc_info promisc;
498 promisc.vsi_id = vf->vsi_res->vsi_id;
501 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
503 if (enable_multicast)
504 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
506 args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
507 args.in_args = (uint8_t *)&promisc;
508 args.in_args_size = sizeof(promisc);
509 args.out_buffer = vf->aq_resp;
510 args.out_size = I40E_AQ_BUF_SZ;
512 err = i40evf_execute_vf_cmd(dev, &args);
515 PMD_DRV_LOG(ERR, "fail to execute command "
516 "CONFIG_PROMISCUOUS_MODE");
521 i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
523 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
524 struct vf_cmd_info args;
527 memset(&args, 0, sizeof(args));
528 args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
530 args.in_args_size = 0;
531 args.out_buffer = vf->aq_resp;
532 args.out_size = I40E_AQ_BUF_SZ;
533 ret = i40evf_execute_vf_cmd(dev, &args);
535 PMD_DRV_LOG(ERR, "Failed to execute command of "
536 "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
542 i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
544 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
545 struct vf_cmd_info args;
548 memset(&args, 0, sizeof(args));
549 args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
551 args.in_args_size = 0;
552 args.out_buffer = vf->aq_resp;
553 args.out_size = I40E_AQ_BUF_SZ;
554 ret = i40evf_execute_vf_cmd(dev, &args);
556 PMD_DRV_LOG(ERR, "Failed to execute command of "
557 "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
563 i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
567 struct i40e_tx_queue *txq)
569 txq_info->vsi_id = vsi_id;
570 txq_info->queue_id = queue_id;
571 if (queue_id < nb_txq) {
572 txq_info->ring_len = txq->nb_tx_desc;
573 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
578 i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
582 uint32_t max_pkt_size,
583 struct i40e_rx_queue *rxq)
585 rxq_info->vsi_id = vsi_id;
586 rxq_info->queue_id = queue_id;
587 rxq_info->max_pkt_size = max_pkt_size;
588 if (queue_id < nb_rxq) {
589 rxq_info->ring_len = rxq->nb_rx_desc;
590 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
591 rxq_info->databuffer_size =
592 (rte_pktmbuf_data_room_size(rxq->mp) -
593 RTE_PKTMBUF_HEADROOM);
598 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
600 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
601 struct i40e_rx_queue **rxq =
602 (struct i40e_rx_queue **)dev->data->rx_queues;
603 struct i40e_tx_queue **txq =
604 (struct i40e_tx_queue **)dev->data->tx_queues;
605 struct virtchnl_vsi_queue_config_info *vc_vqci;
606 struct virtchnl_queue_pair_info *vc_qpi;
607 struct vf_cmd_info args;
608 uint16_t i, nb_qp = vf->num_queue_pairs;
609 const uint32_t size =
610 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
614 memset(buff, 0, sizeof(buff));
615 vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
616 vc_vqci->vsi_id = vf->vsi_res->vsi_id;
617 vc_vqci->num_queue_pairs = nb_qp;
619 for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
620 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
621 vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
622 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
623 vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
624 vf->max_pkt_len, rxq[i]);
626 memset(&args, 0, sizeof(args));
627 args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
628 args.in_args = (uint8_t *)vc_vqci;
629 args.in_args_size = size;
630 args.out_buffer = vf->aq_resp;
631 args.out_size = I40E_AQ_BUF_SZ;
632 ret = i40evf_execute_vf_cmd(dev, &args);
634 PMD_DRV_LOG(ERR, "Failed to execute command of "
635 "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
641 i40evf_config_irq_map(struct rte_eth_dev *dev)
643 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
644 struct vf_cmd_info args;
645 uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
646 sizeof(struct virtchnl_vector_map)];
647 struct virtchnl_irq_map_info *map_info;
648 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
649 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
653 if (rte_intr_allow_others(intr_handle))
654 vector_id = I40E_RX_VEC_START;
656 vector_id = I40E_MISC_VEC_ID;
658 map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
659 map_info->num_vectors = 1;
660 map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
661 map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
662 /* Alway use default dynamic MSIX interrupt */
663 map_info->vecmap[0].vector_id = vector_id;
664 /* Don't map any tx queue */
665 map_info->vecmap[0].txq_map = 0;
666 map_info->vecmap[0].rxq_map = 0;
667 for (i = 0; i < dev->data->nb_rx_queues; i++) {
668 map_info->vecmap[0].rxq_map |= 1 << i;
669 if (rte_intr_dp_is_en(intr_handle))
670 intr_handle->intr_vec[i] = vector_id;
673 args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
674 args.in_args = (u8 *)cmd_buffer;
675 args.in_args_size = sizeof(cmd_buffer);
676 args.out_buffer = vf->aq_resp;
677 args.out_size = I40E_AQ_BUF_SZ;
678 err = i40evf_execute_vf_cmd(dev, &args);
680 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
686 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
689 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
690 struct virtchnl_queue_select queue_select;
692 struct vf_cmd_info args;
693 memset(&queue_select, 0, sizeof(queue_select));
694 queue_select.vsi_id = vf->vsi_res->vsi_id;
697 queue_select.rx_queues |= 1 << qid;
699 queue_select.tx_queues |= 1 << qid;
702 args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
704 args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
705 args.in_args = (u8 *)&queue_select;
706 args.in_args_size = sizeof(queue_select);
707 args.out_buffer = vf->aq_resp;
708 args.out_size = I40E_AQ_BUF_SZ;
709 err = i40evf_execute_vf_cmd(dev, &args);
711 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
712 isrx ? "RX" : "TX", qid, on ? "on" : "off");
718 i40evf_start_queues(struct rte_eth_dev *dev)
720 struct rte_eth_dev_data *dev_data = dev->data;
722 struct i40e_rx_queue *rxq;
723 struct i40e_tx_queue *txq;
725 for (i = 0; i < dev->data->nb_rx_queues; i++) {
726 rxq = dev_data->rx_queues[i];
727 if (rxq->rx_deferred_start)
729 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
730 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
735 for (i = 0; i < dev->data->nb_tx_queues; i++) {
736 txq = dev_data->tx_queues[i];
737 if (txq->tx_deferred_start)
739 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
740 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
749 i40evf_stop_queues(struct rte_eth_dev *dev)
753 /* Stop TX queues first */
754 for (i = 0; i < dev->data->nb_tx_queues; i++) {
755 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
756 PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
761 /* Then stop RX queues */
762 for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
764 PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
773 i40evf_add_mac_addr(struct rte_eth_dev *dev,
774 struct ether_addr *addr,
775 __rte_unused uint32_t index,
776 __rte_unused uint32_t pool)
778 struct virtchnl_ether_addr_list *list;
779 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
780 uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
781 sizeof(struct virtchnl_ether_addr)];
783 struct vf_cmd_info args;
785 if (is_zero_ether_addr(addr)) {
786 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
787 addr->addr_bytes[0], addr->addr_bytes[1],
788 addr->addr_bytes[2], addr->addr_bytes[3],
789 addr->addr_bytes[4], addr->addr_bytes[5]);
790 return I40E_ERR_INVALID_MAC_ADDR;
793 list = (struct virtchnl_ether_addr_list *)cmd_buffer;
794 list->vsi_id = vf->vsi_res->vsi_id;
795 list->num_elements = 1;
796 rte_memcpy(list->list[0].addr, addr->addr_bytes,
797 sizeof(addr->addr_bytes));
799 args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
800 args.in_args = cmd_buffer;
801 args.in_args_size = sizeof(cmd_buffer);
802 args.out_buffer = vf->aq_resp;
803 args.out_size = I40E_AQ_BUF_SZ;
804 err = i40evf_execute_vf_cmd(dev, &args);
806 PMD_DRV_LOG(ERR, "fail to execute command "
807 "OP_ADD_ETHER_ADDRESS");
815 i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
816 struct ether_addr *addr)
818 struct virtchnl_ether_addr_list *list;
819 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
820 uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
821 sizeof(struct virtchnl_ether_addr)];
823 struct vf_cmd_info args;
825 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
826 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
827 addr->addr_bytes[0], addr->addr_bytes[1],
828 addr->addr_bytes[2], addr->addr_bytes[3],
829 addr->addr_bytes[4], addr->addr_bytes[5]);
833 list = (struct virtchnl_ether_addr_list *)cmd_buffer;
834 list->vsi_id = vf->vsi_res->vsi_id;
835 list->num_elements = 1;
836 rte_memcpy(list->list[0].addr, addr->addr_bytes,
837 sizeof(addr->addr_bytes));
839 args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
840 args.in_args = cmd_buffer;
841 args.in_args_size = sizeof(cmd_buffer);
842 args.out_buffer = vf->aq_resp;
843 args.out_size = I40E_AQ_BUF_SZ;
844 err = i40evf_execute_vf_cmd(dev, &args);
846 PMD_DRV_LOG(ERR, "fail to execute command "
847 "OP_DEL_ETHER_ADDRESS");
854 i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
856 struct rte_eth_dev_data *data = dev->data;
857 struct ether_addr *addr;
859 addr = &data->mac_addrs[index];
861 i40evf_del_mac_addr_by_addr(dev, addr);
865 i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
867 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
868 struct virtchnl_queue_select q_stats;
870 struct vf_cmd_info args;
872 memset(&q_stats, 0, sizeof(q_stats));
873 q_stats.vsi_id = vf->vsi_res->vsi_id;
874 args.ops = VIRTCHNL_OP_GET_STATS;
875 args.in_args = (u8 *)&q_stats;
876 args.in_args_size = sizeof(q_stats);
877 args.out_buffer = vf->aq_resp;
878 args.out_size = I40E_AQ_BUF_SZ;
880 err = i40evf_execute_vf_cmd(dev, &args);
882 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
886 *pstats = (struct i40e_eth_stats *)args.out_buffer;
891 i40evf_stat_update_48(uint64_t *offset,
894 if (*stat >= *offset)
895 *stat = *stat - *offset;
897 *stat = (uint64_t)((*stat +
898 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
900 *stat &= I40E_48_BIT_MASK;
904 i40evf_stat_update_32(uint64_t *offset,
907 if (*stat >= *offset)
908 *stat = (uint64_t)(*stat - *offset);
910 *stat = (uint64_t)((*stat +
911 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
915 i40evf_update_stats(struct i40e_vsi *vsi,
916 struct i40e_eth_stats *nes)
918 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
920 i40evf_stat_update_48(&oes->rx_bytes,
922 i40evf_stat_update_48(&oes->rx_unicast,
924 i40evf_stat_update_48(&oes->rx_multicast,
926 i40evf_stat_update_48(&oes->rx_broadcast,
928 i40evf_stat_update_32(&oes->rx_discards,
930 i40evf_stat_update_32(&oes->rx_unknown_protocol,
931 &nes->rx_unknown_protocol);
932 i40evf_stat_update_48(&oes->tx_bytes,
934 i40evf_stat_update_48(&oes->tx_unicast,
936 i40evf_stat_update_48(&oes->tx_multicast,
938 i40evf_stat_update_48(&oes->tx_broadcast,
940 i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
941 i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
945 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
947 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
948 struct i40e_eth_stats *pstats = NULL;
950 /* read stat values to clear hardware registers */
951 i40evf_query_stats(dev, &pstats);
953 /* set stats offset base on current values */
954 vf->vsi.eth_stats_offset = *pstats;
957 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
958 struct rte_eth_xstat_name *xstats_names,
959 __rte_unused unsigned limit)
963 if (xstats_names != NULL)
964 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
965 snprintf(xstats_names[i].name,
966 sizeof(xstats_names[i].name),
967 "%s", rte_i40evf_stats_strings[i].name);
969 return I40EVF_NB_XSTATS;
972 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
973 struct rte_eth_xstat *xstats, unsigned n)
977 struct i40e_eth_stats *pstats = NULL;
978 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
979 struct i40e_vsi *vsi = &vf->vsi;
981 if (n < I40EVF_NB_XSTATS)
982 return I40EVF_NB_XSTATS;
984 ret = i40evf_query_stats(dev, &pstats);
991 i40evf_update_stats(vsi, pstats);
993 /* loop over xstats array and values from pstats */
994 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
996 xstats[i].value = *(uint64_t *)(((char *)pstats) +
997 rte_i40evf_stats_strings[i].offset);
1000 return I40EVF_NB_XSTATS;
1004 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1006 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1007 struct virtchnl_vlan_filter_list *vlan_list;
1008 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1011 struct vf_cmd_info args;
1013 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1014 vlan_list->vsi_id = vf->vsi_res->vsi_id;
1015 vlan_list->num_elements = 1;
1016 vlan_list->vlan_id[0] = vlanid;
1018 args.ops = VIRTCHNL_OP_ADD_VLAN;
1019 args.in_args = (u8 *)&cmd_buffer;
1020 args.in_args_size = sizeof(cmd_buffer);
1021 args.out_buffer = vf->aq_resp;
1022 args.out_size = I40E_AQ_BUF_SZ;
1023 err = i40evf_execute_vf_cmd(dev, &args);
1025 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1031 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1033 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1034 struct virtchnl_vlan_filter_list *vlan_list;
1035 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1038 struct vf_cmd_info args;
1040 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1041 vlan_list->vsi_id = vf->vsi_res->vsi_id;
1042 vlan_list->num_elements = 1;
1043 vlan_list->vlan_id[0] = vlanid;
1045 args.ops = VIRTCHNL_OP_DEL_VLAN;
1046 args.in_args = (u8 *)&cmd_buffer;
1047 args.in_args_size = sizeof(cmd_buffer);
1048 args.out_buffer = vf->aq_resp;
1049 args.out_size = I40E_AQ_BUF_SZ;
1050 err = i40evf_execute_vf_cmd(dev, &args);
1052 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1057 static const struct rte_pci_id pci_id_i40evf_map[] = {
1058 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
1059 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
1060 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
1061 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
1062 { .vendor_id = 0, /* sentinel */ },
1066 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1067 struct rte_eth_link *link)
1069 struct rte_eth_link *dst = &(dev->data->dev_link);
1070 struct rte_eth_link *src = link;
1072 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1073 *(uint64_t *)src) == 0)
1081 i40evf_disable_irq0(struct i40e_hw *hw)
1083 /* Disable all interrupt types */
1084 I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
1085 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1086 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1087 I40EVF_WRITE_FLUSH(hw);
1092 i40evf_enable_irq0(struct i40e_hw *hw)
1094 /* Enable admin queue interrupt trigger */
1097 i40evf_disable_irq0(hw);
1098 val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
1099 val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
1100 I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
1101 I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
1103 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1104 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1105 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1106 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1108 I40EVF_WRITE_FLUSH(hw);
1112 i40evf_check_vf_reset_done(struct i40e_hw *hw)
1116 for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1117 reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
1118 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1119 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1120 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1121 reset == VIRTCHNL_VFR_COMPLETED)
1126 if (i >= MAX_RESET_WAIT_CNT)
1132 i40evf_reset_vf(struct i40e_hw *hw)
1136 if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1137 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1141 * After issuing vf reset command to pf, pf won't necessarily
1142 * reset vf, it depends on what state it exactly is. If it's not
1143 * initialized yet, it won't have vf reset since it's in a certain
1144 * state. If not, it will try to reset. Even vf is reset, pf will
1145 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1146 * it to ACTIVE. In this duration, vf may not catch the moment that
1147 * COMPLETE is set. So, for vf, we'll try to wait a long time.
1151 ret = i40evf_check_vf_reset_done(hw);
1153 PMD_INIT_LOG(ERR, "VF is still resetting");
1161 i40evf_init_vf(struct rte_eth_dev *dev)
1164 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1167 i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
1169 vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1170 vf->dev_data = dev->data;
1171 err = i40e_set_mac_type(hw);
1173 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1177 err = i40evf_check_vf_reset_done(hw);
1181 i40e_init_adminq_parameter(hw);
1182 err = i40e_init_adminq(hw);
1184 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1188 /* Reset VF and wait until it's complete */
1189 if (i40evf_reset_vf(hw)) {
1190 PMD_INIT_LOG(ERR, "reset NIC failed");
1194 /* VF reset, shutdown admin queue and initialize again */
1195 if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1196 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1200 i40e_init_adminq_parameter(hw);
1201 if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1202 PMD_INIT_LOG(ERR, "init_adminq failed");
1206 vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
1208 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1211 if (i40evf_check_api_version(dev) != 0) {
1212 PMD_INIT_LOG(ERR, "check_api version failed");
1215 bufsz = sizeof(struct virtchnl_vf_resource) +
1216 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1217 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1219 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1223 if (i40evf_get_vf_resource(dev) != 0) {
1224 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1228 /* got VF config message back from PF, now we can parse it */
1229 for (i = 0; i < vf->vf_res->num_vsis; i++) {
1230 if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
1231 vf->vsi_res = &vf->vf_res->vsi_res[i];
1235 PMD_INIT_LOG(ERR, "no LAN VSI found");
1239 if (hw->mac.type == I40E_MAC_X722_VF)
1240 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1241 vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1243 switch (vf->vsi_res->vsi_type) {
1244 case VIRTCHNL_VSI_SRIOV:
1245 vf->vsi.type = I40E_VSI_SRIOV;
1248 vf->vsi.type = I40E_VSI_TYPE_UNKNOWN;
1251 vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1252 vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1254 /* Store the MAC address configured by host, or generate random one */
1255 if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1256 vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
1258 eth_random_addr(hw->mac.addr); /* Generate a random one */
1260 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1261 (I40E_ITR_INDEX_DEFAULT <<
1262 I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1264 I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
1265 I40EVF_WRITE_FLUSH(hw);
1270 rte_free(vf->vf_res);
1273 rte_free(vf->aq_resp);
1275 i40e_shutdown_adminq(hw); /* ignore error */
1281 i40evf_uninit_vf(struct rte_eth_dev *dev)
1283 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1284 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1286 PMD_INIT_FUNC_TRACE();
1288 if (hw->adapter_stopped == 0)
1289 i40evf_dev_close(dev);
1290 rte_free(vf->vf_res);
1292 rte_free(vf->aq_resp);
1299 i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
1300 __rte_unused uint16_t msglen)
1302 struct virtchnl_pf_event *pf_msg =
1303 (struct virtchnl_pf_event *)msg;
1304 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1306 switch (pf_msg->event) {
1307 case VIRTCHNL_EVENT_RESET_IMPENDING:
1308 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
1309 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1312 case VIRTCHNL_EVENT_LINK_CHANGE:
1313 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
1314 vf->link_up = pf_msg->event_data.link_event.link_status;
1315 vf->link_speed = pf_msg->event_data.link_event.link_speed;
1317 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
1318 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
1321 PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
1327 i40evf_handle_aq_msg(struct rte_eth_dev *dev)
1329 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1330 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1331 struct i40e_arq_event_info info;
1332 uint16_t pending, aq_opc;
1333 enum virtchnl_ops msg_opc;
1334 enum i40e_status_code msg_ret;
1337 info.buf_len = I40E_AQ_BUF_SZ;
1339 PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
1342 info.msg_buf = vf->aq_resp;
1346 ret = i40e_clean_arq_element(hw, &info, &pending);
1348 if (ret != I40E_SUCCESS) {
1349 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
1353 aq_opc = rte_le_to_cpu_16(info.desc.opcode);
1354 /* For the message sent from pf to vf, opcode is stored in
1355 * cookie_high of struct i40e_aq_desc, while return error code
1356 * are stored in cookie_low, Which is done by
1357 * i40e_aq_send_msg_to_vf in PF driver.*/
1358 msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
1359 info.desc.cookie_high);
1360 msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
1361 info.desc.cookie_low);
1363 case i40e_aqc_opc_send_msg_to_vf:
1364 if (msg_opc == VIRTCHNL_OP_EVENT)
1366 i40evf_handle_pf_event(dev, info.msg_buf,
1369 /* read message and it's expected one */
1370 if (msg_opc == vf->pend_cmd) {
1371 vf->cmd_retval = msg_ret;
1372 /* prevent compiler reordering */
1373 rte_compiler_barrier();
1376 PMD_DRV_LOG(ERR, "command mismatch,"
1377 "expect %u, get %u",
1378 vf->pend_cmd, msg_opc);
1379 PMD_DRV_LOG(DEBUG, "adminq response is received,"
1380 " opcode = %d", msg_opc);
1384 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
1392 * Interrupt handler triggered by NIC for handling
1393 * specific interrupt. Only adminq interrupt is processed in VF.
1396 * Pointer to interrupt handle.
1398 * The address of parameter (struct rte_eth_dev *) regsitered before.
1404 i40evf_dev_interrupt_handler(void *param)
1406 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1407 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1410 i40evf_disable_irq0(hw);
1412 /* read out interrupt causes */
1413 icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
1415 /* No interrupt event indicated */
1416 if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
1417 PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
1421 if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
1422 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
1423 i40evf_handle_aq_msg(dev);
1426 /* Link Status Change interrupt */
1427 if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
1428 PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
1432 i40evf_enable_irq0(hw);
1436 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1439 = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1440 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1442 PMD_INIT_FUNC_TRACE();
1444 /* assign ops func pointer */
1445 eth_dev->dev_ops = &i40evf_eth_dev_ops;
1446 eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1447 eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1450 * For secondary processes, we don't initialise any further as primary
1451 * has already done this work.
1453 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1454 i40e_set_rx_function(eth_dev);
1455 i40e_set_tx_function(eth_dev);
1458 i40e_set_default_ptype_table(eth_dev);
1459 i40e_set_default_pctype_table(eth_dev);
1460 rte_eth_copy_pci_info(eth_dev, pci_dev);
1462 hw->vendor_id = pci_dev->id.vendor_id;
1463 hw->device_id = pci_dev->id.device_id;
1464 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1465 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1466 hw->bus.device = pci_dev->addr.devid;
1467 hw->bus.func = pci_dev->addr.function;
1468 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1469 hw->adapter_stopped = 0;
1471 if(i40evf_init_vf(eth_dev) != 0) {
1472 PMD_INIT_LOG(ERR, "Init vf failed");
1476 /* register callback func to eal lib */
1477 rte_intr_callback_register(&pci_dev->intr_handle,
1478 i40evf_dev_interrupt_handler, (void *)eth_dev);
1480 /* enable uio intr after callback register */
1481 rte_intr_enable(&pci_dev->intr_handle);
1483 /* configure and enable device interrupt */
1484 i40evf_enable_irq0(hw);
1487 eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1488 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1490 if (eth_dev->data->mac_addrs == NULL) {
1491 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1492 " store MAC addresses",
1493 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1496 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1497 ð_dev->data->mac_addrs[0]);
1503 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1505 PMD_INIT_FUNC_TRACE();
1507 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1510 eth_dev->dev_ops = NULL;
1511 eth_dev->rx_pkt_burst = NULL;
1512 eth_dev->tx_pkt_burst = NULL;
1514 if (i40evf_uninit_vf(eth_dev) != 0) {
1515 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1519 rte_free(eth_dev->data->mac_addrs);
1520 eth_dev->data->mac_addrs = NULL;
1525 static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1526 struct rte_pci_device *pci_dev)
1528 return rte_eth_dev_pci_generic_probe(pci_dev,
1529 sizeof(struct i40e_adapter), i40evf_dev_init);
1532 static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
1534 return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
1538 * virtual function driver struct
1540 static struct rte_pci_driver rte_i40evf_pmd = {
1541 .id_table = pci_id_i40evf_map,
1542 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1543 .probe = eth_i40evf_pci_probe,
1544 .remove = eth_i40evf_pci_remove,
1547 RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
1548 RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
1549 RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
1552 i40evf_dev_configure(struct rte_eth_dev *dev)
1554 struct i40e_adapter *ad =
1555 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1556 struct rte_eth_conf *conf = &dev->data->dev_conf;
1559 /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1560 * allocation or vector Rx preconditions we will reset it.
1562 ad->rx_bulk_alloc_allowed = true;
1563 ad->rx_vec_allowed = true;
1564 ad->tx_simple_allowed = true;
1565 ad->tx_vec_allowed = true;
1567 /* For non-DPDK PF drivers, VF has no ability to disable HW
1568 * CRC strip, and is implicitly enabled by the PF.
1570 if (!conf->rxmode.hw_strip_crc) {
1571 vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1572 if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
1573 (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
1574 /* Peer is running non-DPDK PF driver. */
1575 PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
1580 return i40evf_init_vlan(dev);
1584 i40evf_init_vlan(struct rte_eth_dev *dev)
1586 /* Apply vlan offload setting */
1587 return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1591 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1593 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1595 /* Vlan stripping setting */
1596 if (mask & ETH_VLAN_STRIP_MASK) {
1597 /* Enable or disable VLAN stripping */
1598 if (dev_conf->rxmode.hw_vlan_strip)
1599 i40evf_enable_vlan_strip(dev);
1601 i40evf_disable_vlan_strip(dev);
1608 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1610 struct i40e_rx_queue *rxq;
1612 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 PMD_INIT_FUNC_TRACE();
1616 if (rx_queue_id < dev->data->nb_rx_queues) {
1617 rxq = dev->data->rx_queues[rx_queue_id];
1619 err = i40e_alloc_rx_queue_mbufs(rxq);
1621 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1627 /* Init the RX tail register. */
1628 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1629 I40EVF_WRITE_FLUSH(hw);
1631 /* Ready to switch the queue on */
1632 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1635 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1638 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1645 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1647 struct i40e_rx_queue *rxq;
1650 if (rx_queue_id < dev->data->nb_rx_queues) {
1651 rxq = dev->data->rx_queues[rx_queue_id];
1653 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1656 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1661 i40e_rx_queue_release_mbufs(rxq);
1662 i40e_reset_rx_queue(rxq);
1663 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1670 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1674 PMD_INIT_FUNC_TRACE();
1676 if (tx_queue_id < dev->data->nb_tx_queues) {
1678 /* Ready to switch the queue on */
1679 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1682 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1685 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1692 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1694 struct i40e_tx_queue *txq;
1697 if (tx_queue_id < dev->data->nb_tx_queues) {
1698 txq = dev->data->tx_queues[tx_queue_id];
1700 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1703 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1708 i40e_tx_queue_release_mbufs(txq);
1709 i40e_reset_tx_queue(txq);
1710 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1717 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1722 ret = i40evf_add_vlan(dev, vlan_id);
1724 ret = i40evf_del_vlan(dev,vlan_id);
1730 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1732 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733 struct rte_eth_dev_data *dev_data = dev->data;
1734 struct rte_pktmbuf_pool_private *mbp_priv;
1735 uint16_t buf_size, len;
1737 rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1738 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1739 I40EVF_WRITE_FLUSH(hw);
1741 /* Calculate the maximum packet length allowed */
1742 mbp_priv = rte_mempool_get_priv(rxq->mp);
1743 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1744 RTE_PKTMBUF_HEADROOM);
1745 rxq->hs_mode = i40e_header_split_none;
1746 rxq->rx_hdr_len = 0;
1747 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1748 len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1749 rxq->max_pkt_len = RTE_MIN(len,
1750 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1753 * Check if the jumbo frame and maximum packet length are set correctly
1755 if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1756 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1757 rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1758 PMD_DRV_LOG(ERR, "maximum packet length must be "
1759 "larger than %u and smaller than %u, as jumbo "
1760 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1761 (uint32_t)I40E_FRAME_SIZE_MAX);
1762 return I40E_ERR_CONFIG;
1765 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1766 rxq->max_pkt_len > ETHER_MAX_LEN) {
1767 PMD_DRV_LOG(ERR, "maximum packet length must be "
1768 "larger than %u and smaller than %u, as jumbo "
1769 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1770 (uint32_t)ETHER_MAX_LEN);
1771 return I40E_ERR_CONFIG;
1775 if (dev_data->dev_conf.rxmode.enable_scatter ||
1776 (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1777 dev_data->scattered_rx = 1;
1784 i40evf_rx_init(struct rte_eth_dev *dev)
1786 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1788 int ret = I40E_SUCCESS;
1789 struct i40e_rx_queue **rxq =
1790 (struct i40e_rx_queue **)dev->data->rx_queues;
1792 i40evf_config_rss(vf);
1793 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1794 if (!rxq[i] || !rxq[i]->q_set)
1796 ret = i40evf_rxq_init(dev, rxq[i]);
1797 if (ret != I40E_SUCCESS)
1800 if (ret == I40E_SUCCESS)
1801 i40e_set_rx_function(dev);
1807 i40evf_tx_init(struct rte_eth_dev *dev)
1810 struct i40e_tx_queue **txq =
1811 (struct i40e_tx_queue **)dev->data->tx_queues;
1812 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1814 for (i = 0; i < dev->data->nb_tx_queues; i++)
1815 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1817 i40e_set_tx_function(dev);
1821 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1823 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1824 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1825 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1827 if (!rte_intr_allow_others(intr_handle)) {
1829 I40E_VFINT_DYN_CTL01,
1830 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1831 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1832 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1833 I40EVF_WRITE_FLUSH(hw);
1837 I40EVF_WRITE_FLUSH(hw);
1841 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1843 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1844 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1845 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1847 if (!rte_intr_allow_others(intr_handle)) {
1848 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1849 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1850 I40EVF_WRITE_FLUSH(hw);
1854 I40EVF_WRITE_FLUSH(hw);
1858 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1860 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1861 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1862 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1864 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1867 msix_intr = intr_handle->intr_vec[queue_id];
1868 if (msix_intr == I40E_MISC_VEC_ID)
1869 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1870 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1871 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1872 (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1874 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1877 I40E_VFINT_DYN_CTLN1(msix_intr -
1879 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1880 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1881 (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1883 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1885 I40EVF_WRITE_FLUSH(hw);
1887 rte_intr_enable(&pci_dev->intr_handle);
1893 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1895 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1896 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1897 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1900 msix_intr = intr_handle->intr_vec[queue_id];
1901 if (msix_intr == I40E_MISC_VEC_ID)
1902 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1905 I40E_VFINT_DYN_CTLN1(msix_intr -
1909 I40EVF_WRITE_FLUSH(hw);
1915 i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
1917 struct virtchnl_ether_addr_list *list;
1918 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1923 struct ether_addr *addr;
1924 struct vf_cmd_info args;
1928 len = sizeof(struct virtchnl_ether_addr_list);
1929 for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
1930 if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
1932 len += sizeof(struct virtchnl_ether_addr);
1933 if (len >= I40E_AQ_BUF_SZ) {
1939 list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
1941 PMD_DRV_LOG(ERR, "fail to allocate memory");
1945 for (i = begin; i < next_begin; i++) {
1946 addr = &dev->data->mac_addrs[i];
1947 if (is_zero_ether_addr(addr))
1949 rte_memcpy(list->list[j].addr, addr->addr_bytes,
1950 sizeof(addr->addr_bytes));
1951 PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1952 addr->addr_bytes[0], addr->addr_bytes[1],
1953 addr->addr_bytes[2], addr->addr_bytes[3],
1954 addr->addr_bytes[4], addr->addr_bytes[5]);
1957 list->vsi_id = vf->vsi_res->vsi_id;
1958 list->num_elements = j;
1959 args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1960 VIRTCHNL_OP_DEL_ETH_ADDR;
1961 args.in_args = (uint8_t *)list;
1962 args.in_args_size = len;
1963 args.out_buffer = vf->aq_resp;
1964 args.out_size = I40E_AQ_BUF_SZ;
1965 err = i40evf_execute_vf_cmd(dev, &args);
1967 PMD_DRV_LOG(ERR, "fail to execute command %s",
1968 add ? "OP_ADD_ETHER_ADDRESS" :
1969 "OP_DEL_ETHER_ADDRESS");
1978 } while (begin < I40E_NUM_MACADDR_MAX);
1982 i40evf_dev_start(struct rte_eth_dev *dev)
1984 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1985 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1986 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1987 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1988 uint32_t intr_vector = 0;
1990 PMD_INIT_FUNC_TRACE();
1992 hw->adapter_stopped = 0;
1994 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1995 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1996 dev->data->nb_tx_queues);
1998 /* check and configure queue intr-vector mapping */
1999 if (dev->data->dev_conf.intr_conf.rxq != 0) {
2000 intr_vector = dev->data->nb_rx_queues;
2001 if (rte_intr_efd_enable(intr_handle, intr_vector))
2005 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2006 intr_handle->intr_vec =
2007 rte_zmalloc("intr_vec",
2008 dev->data->nb_rx_queues * sizeof(int), 0);
2009 if (!intr_handle->intr_vec) {
2010 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2011 " intr_vec", dev->data->nb_rx_queues);
2016 if (i40evf_rx_init(dev) != 0){
2017 PMD_DRV_LOG(ERR, "failed to do RX init");
2021 i40evf_tx_init(dev);
2023 if (i40evf_configure_vsi_queues(dev) != 0) {
2024 PMD_DRV_LOG(ERR, "configure queues failed");
2027 if (i40evf_config_irq_map(dev)) {
2028 PMD_DRV_LOG(ERR, "config_irq_map failed");
2032 /* Set all mac addrs */
2033 i40evf_add_del_all_mac_addr(dev, TRUE);
2035 if (i40evf_start_queues(dev) != 0) {
2036 PMD_DRV_LOG(ERR, "enable queues failed");
2040 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
2041 * is mapped to VFIO vector 0 in i40evf_dev_init( ).
2042 * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
2043 * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
2044 * queue interrupt to other VFIO vectors.
2045 * So clear uio/vfio intr/evevnfd first to avoid failure.
2047 if (dev->data->dev_conf.intr_conf.rxq != 0) {
2048 rte_intr_disable(intr_handle);
2049 rte_intr_enable(intr_handle);
2052 i40evf_enable_queues_intr(dev);
2057 i40evf_add_del_all_mac_addr(dev, FALSE);
2063 i40evf_dev_stop(struct rte_eth_dev *dev)
2065 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2066 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2067 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2069 PMD_INIT_FUNC_TRACE();
2071 if (hw->adapter_stopped == 1)
2073 i40evf_stop_queues(dev);
2074 i40evf_disable_queues_intr(dev);
2075 i40e_dev_clear_queues(dev);
2077 /* Clean datapath event and queue/vec mapping */
2078 rte_intr_efd_disable(intr_handle);
2079 if (intr_handle->intr_vec) {
2080 rte_free(intr_handle->intr_vec);
2081 intr_handle->intr_vec = NULL;
2083 /* remove all mac addrs */
2084 i40evf_add_del_all_mac_addr(dev, FALSE);
2085 hw->adapter_stopped = 1;
2090 i40evf_dev_link_update(struct rte_eth_dev *dev,
2091 __rte_unused int wait_to_complete)
2093 struct rte_eth_link new_link;
2094 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2096 * DPDK pf host provide interfacet to acquire link status
2097 * while Linux driver does not
2100 /* Linux driver PF host */
2101 switch (vf->link_speed) {
2102 case I40E_LINK_SPEED_100MB:
2103 new_link.link_speed = ETH_SPEED_NUM_100M;
2105 case I40E_LINK_SPEED_1GB:
2106 new_link.link_speed = ETH_SPEED_NUM_1G;
2108 case I40E_LINK_SPEED_10GB:
2109 new_link.link_speed = ETH_SPEED_NUM_10G;
2111 case I40E_LINK_SPEED_20GB:
2112 new_link.link_speed = ETH_SPEED_NUM_20G;
2114 case I40E_LINK_SPEED_25GB:
2115 new_link.link_speed = ETH_SPEED_NUM_25G;
2117 case I40E_LINK_SPEED_40GB:
2118 new_link.link_speed = ETH_SPEED_NUM_40G;
2121 new_link.link_speed = ETH_SPEED_NUM_100M;
2124 /* full duplex only */
2125 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
2126 new_link.link_status = vf->link_up ? ETH_LINK_UP :
2128 new_link.link_autoneg =
2129 dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
2131 i40evf_dev_atomic_write_link_status(dev, &new_link);
2137 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
2139 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2142 /* If enabled, just return */
2143 if (vf->promisc_unicast_enabled)
2146 ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
2148 vf->promisc_unicast_enabled = TRUE;
2152 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
2154 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2157 /* If disabled, just return */
2158 if (!vf->promisc_unicast_enabled)
2161 ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
2163 vf->promisc_unicast_enabled = FALSE;
2167 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2169 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2172 /* If enabled, just return */
2173 if (vf->promisc_multicast_enabled)
2176 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2178 vf->promisc_multicast_enabled = TRUE;
2182 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2184 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2187 /* If enabled, just return */
2188 if (!vf->promisc_multicast_enabled)
2191 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2193 vf->promisc_multicast_enabled = FALSE;
2197 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2199 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2201 memset(dev_info, 0, sizeof(*dev_info));
2202 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2203 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
2204 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
2205 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2206 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2207 dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2208 dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
2209 dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
2210 dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
2211 dev_info->rx_offload_capa =
2212 DEV_RX_OFFLOAD_VLAN_STRIP |
2213 DEV_RX_OFFLOAD_QINQ_STRIP |
2214 DEV_RX_OFFLOAD_IPV4_CKSUM |
2215 DEV_RX_OFFLOAD_UDP_CKSUM |
2216 DEV_RX_OFFLOAD_TCP_CKSUM;
2217 dev_info->tx_offload_capa =
2218 DEV_TX_OFFLOAD_VLAN_INSERT |
2219 DEV_TX_OFFLOAD_QINQ_INSERT |
2220 DEV_TX_OFFLOAD_IPV4_CKSUM |
2221 DEV_TX_OFFLOAD_UDP_CKSUM |
2222 DEV_TX_OFFLOAD_TCP_CKSUM |
2223 DEV_TX_OFFLOAD_SCTP_CKSUM;
2225 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2227 .pthresh = I40E_DEFAULT_RX_PTHRESH,
2228 .hthresh = I40E_DEFAULT_RX_HTHRESH,
2229 .wthresh = I40E_DEFAULT_RX_WTHRESH,
2231 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2235 dev_info->default_txconf = (struct rte_eth_txconf) {
2237 .pthresh = I40E_DEFAULT_TX_PTHRESH,
2238 .hthresh = I40E_DEFAULT_TX_HTHRESH,
2239 .wthresh = I40E_DEFAULT_TX_WTHRESH,
2241 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2242 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2243 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2244 ETH_TXQ_FLAGS_NOOFFLOADS,
2247 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2248 .nb_max = I40E_MAX_RING_DESC,
2249 .nb_min = I40E_MIN_RING_DESC,
2250 .nb_align = I40E_ALIGN_RING_DESC,
2253 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2254 .nb_max = I40E_MAX_RING_DESC,
2255 .nb_min = I40E_MIN_RING_DESC,
2256 .nb_align = I40E_ALIGN_RING_DESC,
2261 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2264 struct i40e_eth_stats *pstats = NULL;
2265 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2266 struct i40e_vsi *vsi = &vf->vsi;
2268 ret = i40evf_query_stats(dev, &pstats);
2270 i40evf_update_stats(vsi, pstats);
2272 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
2273 pstats->rx_broadcast;
2274 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
2276 stats->imissed = pstats->rx_discards;
2277 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
2278 stats->ibytes = pstats->rx_bytes;
2279 stats->obytes = pstats->tx_bytes;
2281 PMD_DRV_LOG(ERR, "Get statistics failed");
2287 i40evf_dev_close(struct rte_eth_dev *dev)
2289 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2290 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2291 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2293 i40evf_dev_stop(dev);
2294 i40e_dev_free_queues(dev);
2295 i40evf_reset_vf(hw);
2296 i40e_shutdown_adminq(hw);
2297 /* disable uio intr before callback unregister */
2298 rte_intr_disable(intr_handle);
2300 /* unregister callback func from eal lib */
2301 rte_intr_callback_unregister(intr_handle,
2302 i40evf_dev_interrupt_handler, dev);
2303 i40evf_disable_irq0(hw);
2307 * Reset VF device only to re-initialize resources in PMD layer
2310 i40evf_dev_reset(struct rte_eth_dev *dev)
2314 ret = i40evf_dev_uninit(dev);
2318 ret = i40evf_dev_init(dev);
2324 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2326 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2327 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2333 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2334 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2337 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2341 uint32_t *lut_dw = (uint32_t *)lut;
2342 uint16_t i, lut_size_dw = lut_size / 4;
2344 for (i = 0; i < lut_size_dw; i++)
2345 lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2352 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2361 vf = I40E_VSI_TO_VF(vsi);
2362 hw = I40E_VSI_TO_HW(vsi);
2364 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2365 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2368 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2372 uint32_t *lut_dw = (uint32_t *)lut;
2373 uint16_t i, lut_size_dw = lut_size / 4;
2375 for (i = 0; i < lut_size_dw; i++)
2376 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2377 I40EVF_WRITE_FLUSH(hw);
2384 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2385 struct rte_eth_rss_reta_entry64 *reta_conf,
2388 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2390 uint16_t i, idx, shift;
2393 if (reta_size != ETH_RSS_RETA_SIZE_64) {
2394 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2395 "(%d) doesn't match the number of hardware can "
2396 "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2400 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2402 PMD_DRV_LOG(ERR, "No memory can be allocated");
2405 ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2408 for (i = 0; i < reta_size; i++) {
2409 idx = i / RTE_RETA_GROUP_SIZE;
2410 shift = i % RTE_RETA_GROUP_SIZE;
2411 if (reta_conf[idx].mask & (1ULL << shift))
2412 lut[i] = reta_conf[idx].reta[shift];
2414 ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2423 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2424 struct rte_eth_rss_reta_entry64 *reta_conf,
2427 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2428 uint16_t i, idx, shift;
2432 if (reta_size != ETH_RSS_RETA_SIZE_64) {
2433 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2434 "(%d) doesn't match the number of hardware can "
2435 "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2439 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2441 PMD_DRV_LOG(ERR, "No memory can be allocated");
2445 ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2448 for (i = 0; i < reta_size; i++) {
2449 idx = i / RTE_RETA_GROUP_SIZE;
2450 shift = i % RTE_RETA_GROUP_SIZE;
2451 if (reta_conf[idx].mask & (1ULL << shift))
2452 reta_conf[idx].reta[shift] = lut[i];
2462 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2464 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2465 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2468 if (!key || key_len == 0) {
2469 PMD_DRV_LOG(DEBUG, "No key to be configured");
2471 } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2473 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2477 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2478 struct i40e_aqc_get_set_rss_key_data *key_dw =
2479 (struct i40e_aqc_get_set_rss_key_data *)key;
2481 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2483 PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2486 uint32_t *hash_key = (uint32_t *)key;
2489 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2490 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2491 I40EVF_WRITE_FLUSH(hw);
2498 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2500 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2501 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2504 if (!key || !key_len)
2507 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2508 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2509 (struct i40e_aqc_get_set_rss_key_data *)key);
2511 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2515 uint32_t *key_dw = (uint32_t *)key;
2518 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2519 key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2521 *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2527 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2529 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2533 ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2534 rss_conf->rss_key_len);
2538 hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf);
2539 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2540 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2541 I40EVF_WRITE_FLUSH(hw);
2547 i40evf_disable_rss(struct i40e_vf *vf)
2549 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2551 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0);
2552 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0);
2553 I40EVF_WRITE_FLUSH(hw);
2557 i40evf_config_rss(struct i40e_vf *vf)
2559 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2560 struct rte_eth_rss_conf rss_conf;
2561 uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2564 if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2565 i40evf_disable_rss(vf);
2566 PMD_DRV_LOG(DEBUG, "RSS not configured");
2570 num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2571 /* Fill out the look up table */
2572 for (i = 0, j = 0; i < nb_q; i++, j++) {
2575 lut = (lut << 8) | j;
2577 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2580 rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2581 if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) {
2582 i40evf_disable_rss(vf);
2583 PMD_DRV_LOG(DEBUG, "No hash flag is set");
2587 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2588 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2589 /* Calculate the default hash key */
2590 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2591 rss_key_default[i] = (uint32_t)rte_rand();
2592 rss_conf.rss_key = (uint8_t *)rss_key_default;
2593 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2597 return i40evf_hw_rss_hash_set(vf, &rss_conf);
2601 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2602 struct rte_eth_rss_conf *rss_conf)
2604 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2605 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2606 uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask;
2609 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2610 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2612 if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */
2613 if (rss_hf != 0) /* Enable RSS */
2619 if (rss_hf == 0) /* Disable RSS */
2622 return i40evf_hw_rss_hash_set(vf, rss_conf);
2626 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2627 struct rte_eth_rss_conf *rss_conf)
2629 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2630 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2633 i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2634 &rss_conf->rss_key_len);
2636 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2637 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2638 rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena);
2644 i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2646 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2647 struct rte_eth_dev_data *dev_data = vf->dev_data;
2648 uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
2651 /* check if mtu is within the allowed range */
2652 if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
2655 /* mtu setting is forbidden if port is start */
2656 if (dev_data->dev_started) {
2657 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
2662 if (frame_size > ETHER_MAX_LEN)
2663 dev_data->dev_conf.rxmode.jumbo_frame = 1;
2665 dev_data->dev_conf.rxmode.jumbo_frame = 0;
2667 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2673 i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
2674 struct ether_addr *mac_addr)
2676 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2678 if (!is_valid_assigned_ether_addr(mac_addr)) {
2679 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2683 if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
2686 if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
2689 i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
2691 i40evf_add_mac_addr(dev, mac_addr, 0, 0);