4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_interrupts.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memcpy.h>
49 #include <rte_malloc.h>
50 #include <rte_random.h>
52 #include "base/ixgbe_common.h"
53 #include "ixgbe_ethdev.h"
54 #include "rte_pmd_ixgbe.h"
56 #define IXGBE_MAX_VFTA (128)
57 #define IXGBE_VF_MSG_SIZE_DEFAULT 1
58 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
59 #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
61 static inline uint16_t
62 dev_num_vf(struct rte_eth_dev *eth_dev)
64 return eth_dev->pci_dev->max_vfs;
68 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
70 unsigned char vf_mac_addr[ETHER_ADDR_LEN];
71 struct ixgbe_vf_info *vfinfo =
72 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
75 for (vfn = 0; vfn < vf_num; vfn++) {
76 eth_random_addr(vf_mac_addr);
77 /* keep the random address as default */
78 memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
86 ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
88 struct ixgbe_interrupt *intr =
89 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
91 intr->mask |= IXGBE_EICR_MAILBOX;
96 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
98 struct ixgbe_vf_info **vfinfo =
99 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
100 struct ixgbe_mirror_info *mirror_info =
101 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
102 struct ixgbe_uta_info *uta_info =
103 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
104 struct ixgbe_hw *hw =
105 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
109 PMD_INIT_FUNC_TRACE();
111 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
112 vf_num = dev_num_vf(eth_dev);
116 *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
118 rte_panic("Cannot allocate memory for private VF data\n");
120 memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
121 memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
122 hw->mac.mc_filter_type = 0;
124 if (vf_num >= ETH_32_POOLS) {
126 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
127 } else if (vf_num >= ETH_16_POOLS) {
129 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
132 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
135 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
136 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
137 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
139 ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
141 /* init_mailbox_params */
142 hw->mbx.ops.init_params(hw);
144 /* set mb interrupt mask */
145 ixgbe_mb_intr_setup(eth_dev);
148 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
150 struct ixgbe_vf_info **vfinfo;
153 PMD_INIT_FUNC_TRACE();
155 vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
157 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
158 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
159 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
160 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
162 vf_num = dev_num_vf(eth_dev);
171 ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
173 struct ixgbe_hw *hw =
174 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
175 struct ixgbe_filter_info *filter_info =
176 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
180 if (!hw->mac.ops.set_ethertype_anti_spoofing) {
181 RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
186 /* occupy an entity of ether type filter */
187 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
188 if (!(filter_info->ethertype_mask & (1 << i))) {
189 filter_info->ethertype_mask |= 1 << i;
190 filter_info->ethertype_filters[i] =
191 IXGBE_ETHERTYPE_FLOW_CTRL;
195 if (i == IXGBE_MAX_ETQF_FILTERS) {
196 RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
197 " entity for flow control.\n");
201 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
202 (IXGBE_ETQF_FILTER_EN |
203 IXGBE_ETQF_TX_ANTISPOOF |
204 IXGBE_ETHERTYPE_FLOW_CTRL));
206 vf_num = dev_num_vf(eth_dev);
207 for (i = 0; i < vf_num; i++)
208 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
211 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
213 uint32_t vtctl, fcrth;
214 uint32_t vfre_slot, vfre_offset;
216 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
217 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
218 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
219 uint32_t gpie, gcr_ext;
223 vf_num = dev_num_vf(eth_dev);
227 /* enable VMDq and set the default pool for PF */
228 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
229 vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
230 vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
231 vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
232 << IXGBE_VT_CTL_POOL_SHIFT;
233 vtctl |= IXGBE_VT_CTL_REPLEN;
234 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
236 vfre_offset = vf_num & VFRE_MASK;
237 vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
239 /* Enable pools reserved to PF only */
240 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
241 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
242 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
243 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
245 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
246 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
248 /* clear VMDq map to perment rar 0 */
249 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
251 /* clear VMDq map to scan rar 127 */
252 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
253 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
255 /* set VMDq map to default PF pool */
256 hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
259 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
261 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
262 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
264 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
265 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
266 gpie |= IXGBE_GPIE_MSIX_MODE;
268 switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
270 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
271 gpie |= IXGBE_GPIE_VTMODE_64;
274 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
275 gpie |= IXGBE_GPIE_VTMODE_32;
278 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
279 gpie |= IXGBE_GPIE_VTMODE_16;
283 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
284 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
287 * enable vlan filtering and allow all vlan tags through
289 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
290 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
291 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
293 /* VFTA - enable all vlan filters */
294 for (i = 0; i < IXGBE_MAX_VFTA; i++)
295 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
297 /* Enable MAC Anti-Spoofing */
298 hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
300 /* set flow control threshold to max to avoid tx switch hang */
301 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
302 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
303 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
304 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
307 ixgbe_add_tx_flow_control_drop_filter(eth_dev);
313 set_rx_mode(struct rte_eth_dev *dev)
315 struct rte_eth_dev_data *dev_data = dev->data;
316 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
317 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
318 uint16_t vfn = dev_num_vf(dev);
320 /* Check for Promiscuous and All Multicast modes */
321 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
323 /* set all bits that we expect to always be set */
324 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
325 fctrl |= IXGBE_FCTRL_BAM;
327 /* clear the bits we are changing the status of */
328 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
330 if (dev_data->promiscuous) {
331 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
332 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
334 if (dev_data->all_multicast) {
335 fctrl |= IXGBE_FCTRL_MPE;
336 vmolr |= IXGBE_VMOLR_MPE;
338 vmolr |= IXGBE_VMOLR_ROMPE;
342 if (hw->mac.type != ixgbe_mac_82598EB) {
343 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
344 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
346 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
349 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
351 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
352 ixgbe_vlan_hw_strip_enable_all(dev);
354 ixgbe_vlan_hw_strip_disable_all(dev);
358 ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
360 struct ixgbe_hw *hw =
361 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
362 struct ixgbe_vf_info *vfinfo =
363 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
364 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
365 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
367 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
368 IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
369 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
371 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
373 /* reset multicast table array for vf */
374 vfinfo[vf].num_vf_mc_hashes = 0;
379 hw->mac.ops.clear_rar(hw, rar_entry);
383 ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
385 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387 uint32_t reg_offset, vf_shift;
388 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
389 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
390 uint8_t nb_q_per_pool;
393 vf_shift = vf & VFRE_MASK;
394 reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
396 /* enable transmit for vf */
397 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
398 reg |= (reg | (1 << vf_shift));
399 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
401 /* enable all queue drop for IOV */
402 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
403 for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
404 IXGBE_WRITE_FLUSH(hw);
405 reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
406 reg |= i << IXGBE_QDE_IDX_SHIFT;
407 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
410 /* enable receive for vf */
411 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
412 reg |= (reg | (1 << vf_shift));
413 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
415 /* Enable counting of spoofed packets in the SSVPC register */
416 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
417 reg |= (1 << vf_shift);
418 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
420 ixgbe_vf_reset_event(dev, vf);
424 ixgbe_enable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
426 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
429 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
431 RTE_LOG(INFO, PMD, "VF %u: enabling multicast promiscuous\n", vf);
433 vmolr |= IXGBE_VMOLR_MPE;
435 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
441 ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
443 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
446 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
448 RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf);
450 vmolr &= ~IXGBE_VMOLR_MPE;
452 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
458 ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
460 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
461 struct ixgbe_vf_info *vfinfo =
462 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
463 unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
464 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
465 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
467 ixgbe_vf_reset_msg(dev, vf);
469 hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
471 /* Disable multicast promiscuous at reset */
472 ixgbe_disable_vf_mc_promisc(dev, vf);
474 /* reply to reset with ack and vf mac address */
475 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
476 rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
478 * Piggyback the multicast filter type so VF can compute the
481 msgbuf[3] = hw->mac.mc_filter_type;
482 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
488 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
490 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
491 struct ixgbe_vf_info *vfinfo =
492 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
493 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
494 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
496 if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
497 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
498 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
504 ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
506 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
507 struct ixgbe_vf_info *vfinfo =
508 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
509 int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
510 IXGBE_VT_MSGINFO_SHIFT;
511 uint16_t *hash_list = (uint16_t *)&msgbuf[1];
514 const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
515 const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
516 const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
520 /* Disable multicast promiscuous first */
521 ixgbe_disable_vf_mc_promisc(dev, vf);
523 /* only so many hash values supported */
524 nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
526 /* store the mc entries */
527 vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
528 for (i = 0; i < nb_entries; i++) {
529 vfinfo->vf_mc_hashes[i] = hash_list[i];
532 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
533 mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
534 & IXGBE_MTA_INDEX_MASK;
535 mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
536 reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
537 reg_val |= (1 << mta_shift);
538 IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
545 ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
548 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct ixgbe_vf_info *vfinfo =
550 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
552 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
553 >> IXGBE_VT_MSGINFO_SHIFT;
554 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
557 vfinfo[vf].vlan_count++;
558 else if (vfinfo[vf].vlan_count)
559 vfinfo[vf].vlan_count--;
560 return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
564 ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
566 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
567 uint32_t new_mtu = msgbuf[1];
569 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
571 /* X540 and X550 support jumbo frames in IOV mode */
572 if (hw->mac.type != ixgbe_mac_X540 &&
573 hw->mac.type != ixgbe_mac_X550 &&
574 hw->mac.type != ixgbe_mac_X550EM_x &&
575 hw->mac.type != ixgbe_mac_X550EM_a)
578 if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
581 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
582 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
583 if (max_frs < new_mtu) {
584 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
585 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
592 ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
594 uint32_t api_version = msgbuf[1];
595 struct ixgbe_vf_info *vfinfo =
596 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
598 switch (api_version) {
599 case ixgbe_mbox_api_10:
600 case ixgbe_mbox_api_11:
601 case ixgbe_mbox_api_12:
602 vfinfo[vf].api_version = (uint8_t)api_version;
608 RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
615 ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
617 struct ixgbe_vf_info *vfinfo =
618 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
619 uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
621 /* Verify if the PF supports the mbox APIs version or not */
622 switch (vfinfo[vf].api_version) {
623 case ixgbe_mbox_api_20:
624 case ixgbe_mbox_api_11:
625 case ixgbe_mbox_api_12:
631 /* Notify VF of Rx and Tx queue number */
632 msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
633 msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
635 /* Notify VF of default queue */
636 msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
639 * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
640 * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
647 ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
649 struct ixgbe_vf_info *vfinfo =
650 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
651 bool enable = !!msgbuf[1]; /* msgbuf contains the flag to enable */
653 switch (vfinfo[vf].api_version) {
654 case ixgbe_mbox_api_12:
661 return ixgbe_enable_vf_mc_promisc(dev, vf);
663 return ixgbe_disable_vf_mc_promisc(dev, vf);
667 ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
669 uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
670 uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
671 uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
673 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
674 struct ixgbe_vf_info *vfinfo =
675 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
676 struct rte_pmd_ixgbe_mb_event_param cb_param;
678 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
680 PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
684 /* do nothing with the message already been processed */
685 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
688 /* flush the ack before we write any messages back */
689 IXGBE_WRITE_FLUSH(hw);
692 * initialise structure to send to user application
693 * will return response from user in retval field
695 cb_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
697 cb_param.msg_type = msgbuf[0] & 0xFFFF;
698 cb_param.msg = (void *)msgbuf;
700 /* perform VF reset */
701 if (msgbuf[0] == IXGBE_VF_RESET) {
702 int ret = ixgbe_vf_reset(dev, vf, msgbuf);
704 vfinfo[vf].clear_to_send = true;
706 /* notify application about VF reset */
707 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
712 * ask user application if we allowed to perform those functions
713 * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
714 * then business as usual,
715 * if 0, do nothing and send ACK to VF
716 * if cb_param.retval > 1, do nothing and send NAK to VF
718 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
720 retval = cb_param.retval;
722 /* check & process VF to PF mailbox message */
723 switch ((msgbuf[0] & 0xFFFF)) {
724 case IXGBE_VF_SET_MAC_ADDR:
725 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
726 retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
728 case IXGBE_VF_SET_MULTICAST:
729 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
730 retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
732 case IXGBE_VF_SET_LPE:
733 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
734 retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
736 case IXGBE_VF_SET_VLAN:
737 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
738 retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
740 case IXGBE_VF_API_NEGOTIATE:
741 retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
743 case IXGBE_VF_GET_QUEUES:
744 retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
745 msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
747 case IXGBE_VF_UPDATE_XCAST_MODE:
748 if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
749 retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
752 PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
753 retval = IXGBE_ERR_MBX;
757 /* response the VF according to the message process result */
759 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
761 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
763 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
765 ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
771 ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
773 uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
774 struct ixgbe_hw *hw =
775 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
776 struct ixgbe_vf_info *vfinfo =
777 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
779 if (!vfinfo[vf].clear_to_send)
780 ixgbe_write_mbx(hw, &msg, 1, vf);
783 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
786 struct ixgbe_hw *hw =
787 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
789 for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
790 /* check & process vf function level reset */
791 if (!ixgbe_check_for_rst(hw, vf))
792 ixgbe_vf_reset_event(eth_dev, vf);
794 /* check & process vf mailbox messages */
795 if (!ixgbe_check_for_msg(hw, vf))
796 ixgbe_rcv_msg_from_vf(eth_dev, vf);
798 /* check & process acks from vf */
799 if (!ixgbe_check_for_ack(hw, vf))
800 ixgbe_rcv_ack_from_vf(eth_dev, vf);