4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "rte_pmd_i40e.h"
45 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
47 struct rte_eth_dev *dev;
50 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52 dev = &rte_eth_devices[port];
54 if (!is_i40e_supported(dev))
57 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59 if (vf >= pf->vf_num || !pf->vfs) {
60 PMD_DRV_LOG(ERR, "Invalid argument.");
64 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
70 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
72 struct rte_eth_dev *dev;
76 struct i40e_vsi_context ctxt;
79 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
81 dev = &rte_eth_devices[port];
83 if (!is_i40e_supported(dev))
86 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
88 if (vf_id >= pf->vf_num || !pf->vfs) {
89 PMD_DRV_LOG(ERR, "Invalid argument.");
93 vsi = pf->vfs[vf_id].vsi;
95 PMD_DRV_LOG(ERR, "Invalid VSI.");
99 /* Check if it has been already on or off */
100 if (vsi->info.valid_sections &
101 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
103 if ((vsi->info.sec_flags &
104 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
105 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
106 return 0; /* already on */
108 if ((vsi->info.sec_flags &
109 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
110 return 0; /* already off */
114 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
116 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
118 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
120 memset(&ctxt, 0, sizeof(ctxt));
121 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
122 ctxt.seid = vsi->seid;
124 hw = I40E_VSI_TO_HW(vsi);
125 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
126 if (ret != I40E_SUCCESS) {
128 PMD_DRV_LOG(ERR, "Failed to update VSI params");
135 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
139 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
140 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
143 for (j = 0; j < I40E_VFTA_SIZE; j++) {
147 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
148 if (!(vsi->vfta[j] & (1 << k)))
151 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
155 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
157 ret = i40e_aq_add_vlan(hw, vsi->seid,
158 &vlan_data, 1, NULL);
160 ret = i40e_aq_remove_vlan(hw, vsi->seid,
161 &vlan_data, 1, NULL);
162 if (ret != I40E_SUCCESS) {
164 "Failed to add/rm vlan filter");
174 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
176 struct rte_eth_dev *dev;
178 struct i40e_vsi *vsi;
180 struct i40e_vsi_context ctxt;
183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
185 dev = &rte_eth_devices[port];
187 if (!is_i40e_supported(dev))
190 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
192 if (vf_id >= pf->vf_num || !pf->vfs) {
193 PMD_DRV_LOG(ERR, "Invalid argument.");
197 vsi = pf->vfs[vf_id].vsi;
199 PMD_DRV_LOG(ERR, "Invalid VSI.");
203 /* Check if it has been already on or off */
204 if (vsi->vlan_anti_spoof_on == on)
205 return 0; /* already on or off */
207 vsi->vlan_anti_spoof_on = on;
208 if (!vsi->vlan_filter_on) {
209 ret = i40e_add_rm_all_vlan_filter(vsi, on);
211 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
216 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
218 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
220 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
222 memset(&ctxt, 0, sizeof(ctxt));
223 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
224 ctxt.seid = vsi->seid;
226 hw = I40E_VSI_TO_HW(vsi);
227 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
228 if (ret != I40E_SUCCESS) {
230 PMD_DRV_LOG(ERR, "Failed to update VSI params");
237 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
239 struct i40e_mac_filter *f;
240 struct i40e_macvlan_filter *mv_f;
242 enum rte_mac_filter_type filter_type;
243 int ret = I40E_SUCCESS;
246 /* remove all the MACs */
247 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
248 vlan_num = vsi->vlan_num;
249 filter_type = f->mac_info.filter_type;
250 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
251 filter_type == RTE_MACVLAN_HASH_MATCH) {
253 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
254 return I40E_ERR_PARAM;
256 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
257 filter_type == RTE_MAC_HASH_MATCH)
260 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
262 PMD_DRV_LOG(ERR, "failed to allocate memory");
263 return I40E_ERR_NO_MEMORY;
266 for (i = 0; i < vlan_num; i++) {
267 mv_f[i].filter_type = filter_type;
268 rte_memcpy(&mv_f[i].macaddr,
269 &f->mac_info.mac_addr,
272 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
273 filter_type == RTE_MACVLAN_HASH_MATCH) {
274 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
275 &f->mac_info.mac_addr);
276 if (ret != I40E_SUCCESS) {
282 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
283 if (ret != I40E_SUCCESS) {
296 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
298 struct i40e_mac_filter *f;
299 struct i40e_macvlan_filter *mv_f;
301 int ret = I40E_SUCCESS;
304 /* restore all the MACs */
305 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
306 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
307 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
309 * If vlan_num is 0, that's the first time to add mac,
310 * set mask for vlan_id 0.
312 if (vsi->vlan_num == 0) {
313 i40e_set_vlan_filter(vsi, 0, 1);
316 vlan_num = vsi->vlan_num;
317 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
318 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
321 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
323 PMD_DRV_LOG(ERR, "failed to allocate memory");
324 return I40E_ERR_NO_MEMORY;
327 for (i = 0; i < vlan_num; i++) {
328 mv_f[i].filter_type = f->mac_info.filter_type;
329 rte_memcpy(&mv_f[i].macaddr,
330 &f->mac_info.mac_addr,
334 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
335 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
336 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
337 &f->mac_info.mac_addr);
338 if (ret != I40E_SUCCESS) {
344 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
345 if (ret != I40E_SUCCESS) {
358 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
360 struct i40e_vsi_context ctxt;
367 hw = I40E_VSI_TO_HW(vsi);
369 /* Use the FW API if FW >= v5.0 */
370 if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
371 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
375 /* Check if it has been already on or off */
376 if (vsi->info.valid_sections &
377 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
379 if ((vsi->info.switch_id &
380 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
381 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
382 return 0; /* already on */
384 if ((vsi->info.switch_id &
385 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
386 return 0; /* already off */
390 /* remove all the MAC and VLAN first */
391 ret = i40e_vsi_rm_mac_filter(vsi);
393 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
396 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
399 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
404 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
406 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
408 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
410 memset(&ctxt, 0, sizeof(ctxt));
411 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
412 ctxt.seid = vsi->seid;
414 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
415 if (ret != I40E_SUCCESS) {
416 PMD_DRV_LOG(ERR, "Failed to update VSI params");
420 /* add all the MAC and VLAN back */
421 ret = i40e_vsi_restore_mac_filter(vsi);
424 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
425 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
434 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
436 struct rte_eth_dev *dev;
438 struct i40e_pf_vf *vf;
439 struct i40e_vsi *vsi;
443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
445 dev = &rte_eth_devices[port];
447 if (!is_i40e_supported(dev))
450 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
452 /* setup PF TX loopback */
454 ret = i40e_vsi_set_tx_loopback(vsi, on);
458 /* setup TX loopback for all the VFs */
460 /* if no VF, do nothing. */
464 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
465 vf = &pf->vfs[vf_id];
468 ret = i40e_vsi_set_tx_loopback(vsi, on);
477 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
479 struct rte_eth_dev *dev;
481 struct i40e_vsi *vsi;
485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
487 dev = &rte_eth_devices[port];
489 if (!is_i40e_supported(dev))
492 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
494 if (vf_id >= pf->vf_num || !pf->vfs) {
495 PMD_DRV_LOG(ERR, "Invalid argument.");
499 vsi = pf->vfs[vf_id].vsi;
501 PMD_DRV_LOG(ERR, "Invalid VSI.");
505 hw = I40E_VSI_TO_HW(vsi);
507 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
509 if (ret != I40E_SUCCESS) {
511 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
518 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
520 struct rte_eth_dev *dev;
522 struct i40e_vsi *vsi;
526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
528 dev = &rte_eth_devices[port];
530 if (!is_i40e_supported(dev))
533 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
535 if (vf_id >= pf->vf_num || !pf->vfs) {
536 PMD_DRV_LOG(ERR, "Invalid argument.");
540 vsi = pf->vfs[vf_id].vsi;
542 PMD_DRV_LOG(ERR, "Invalid VSI.");
546 hw = I40E_VSI_TO_HW(vsi);
548 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
550 if (ret != I40E_SUCCESS) {
552 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
559 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
560 struct ether_addr *mac_addr)
562 struct i40e_mac_filter *f;
563 struct rte_eth_dev *dev;
564 struct i40e_pf_vf *vf;
565 struct i40e_vsi *vsi;
569 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
574 dev = &rte_eth_devices[port];
576 if (!is_i40e_supported(dev))
579 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
581 if (vf_id >= pf->vf_num || !pf->vfs)
584 vf = &pf->vfs[vf_id];
587 PMD_DRV_LOG(ERR, "Invalid VSI.");
591 ether_addr_copy(mac_addr, &vf->mac_addr);
593 /* Remove all existing mac */
594 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
595 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
597 PMD_DRV_LOG(WARNING, "Delete MAC failed");
602 /* Set vlan strip on/off for specific VF from host */
604 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
606 struct rte_eth_dev *dev;
608 struct i40e_vsi *vsi;
611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
613 dev = &rte_eth_devices[port];
615 if (!is_i40e_supported(dev))
618 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
620 if (vf_id >= pf->vf_num || !pf->vfs) {
621 PMD_DRV_LOG(ERR, "Invalid argument.");
625 vsi = pf->vfs[vf_id].vsi;
630 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
631 if (ret != I40E_SUCCESS) {
633 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
639 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
642 struct rte_eth_dev *dev;
645 struct i40e_vsi *vsi;
646 struct i40e_vsi_context ctxt;
649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
651 if (vlan_id > ETHER_MAX_VLAN_ID) {
652 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
656 dev = &rte_eth_devices[port];
658 if (!is_i40e_supported(dev))
661 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662 hw = I40E_PF_TO_HW(pf);
665 * return -ENODEV if SRIOV not enabled, VF number not configured
666 * or no queue assigned.
668 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
672 if (vf_id >= pf->vf_num || !pf->vfs) {
673 PMD_DRV_LOG(ERR, "Invalid VF ID.");
677 vsi = pf->vfs[vf_id].vsi;
679 PMD_DRV_LOG(ERR, "Invalid VSI.");
683 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
684 vsi->info.pvid = vlan_id;
686 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
688 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
690 memset(&ctxt, 0, sizeof(ctxt));
691 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
692 ctxt.seid = vsi->seid;
694 hw = I40E_VSI_TO_HW(vsi);
695 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
696 if (ret != I40E_SUCCESS) {
698 PMD_DRV_LOG(ERR, "Failed to update VSI params");
704 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
707 struct rte_eth_dev *dev;
709 struct i40e_vsi *vsi;
711 struct i40e_mac_filter_info filter;
712 struct ether_addr broadcast = {
713 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
719 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
723 dev = &rte_eth_devices[port];
725 if (!is_i40e_supported(dev))
728 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729 hw = I40E_PF_TO_HW(pf);
731 if (vf_id >= pf->vf_num || !pf->vfs) {
732 PMD_DRV_LOG(ERR, "Invalid VF ID.");
737 * return -ENODEV if SRIOV not enabled, VF number not configured
738 * or no queue assigned.
740 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
741 pf->vf_nb_qps == 0) {
742 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
746 vsi = pf->vfs[vf_id].vsi;
748 PMD_DRV_LOG(ERR, "Invalid VSI.");
753 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
754 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
755 ret = i40e_vsi_add_mac(vsi, &filter);
757 ret = i40e_vsi_delete_mac(vsi, &broadcast);
760 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
762 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
770 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
772 struct rte_eth_dev *dev;
775 struct i40e_vsi *vsi;
776 struct i40e_vsi_context ctxt;
779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
782 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
786 dev = &rte_eth_devices[port];
788 if (!is_i40e_supported(dev))
791 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
792 hw = I40E_PF_TO_HW(pf);
795 * return -ENODEV if SRIOV not enabled, VF number not configured
796 * or no queue assigned.
798 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
799 pf->vf_nb_qps == 0) {
800 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
804 if (vf_id >= pf->vf_num || !pf->vfs) {
805 PMD_DRV_LOG(ERR, "Invalid VF ID.");
809 vsi = pf->vfs[vf_id].vsi;
811 PMD_DRV_LOG(ERR, "Invalid VSI.");
815 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
817 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
818 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
820 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
821 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
824 memset(&ctxt, 0, sizeof(ctxt));
825 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
826 ctxt.seid = vsi->seid;
828 hw = I40E_VSI_TO_HW(vsi);
829 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
830 if (ret != I40E_SUCCESS) {
832 PMD_DRV_LOG(ERR, "Failed to update VSI params");
839 i40e_vlan_filter_count(struct i40e_vsi *vsi)
845 for (j = 0; j < I40E_VFTA_SIZE; j++) {
849 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
850 if (!(vsi->vfta[j] & (1 << k)))
853 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
864 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
865 uint64_t vf_mask, uint8_t on)
867 struct rte_eth_dev *dev;
870 struct i40e_vsi *vsi;
872 int ret = I40E_SUCCESS;
874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
876 dev = &rte_eth_devices[port];
878 if (!is_i40e_supported(dev))
881 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
882 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
887 PMD_DRV_LOG(ERR, "No VF.");
892 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
896 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
897 hw = I40E_PF_TO_HW(pf);
900 * return -ENODEV if SRIOV not enabled, VF number not configured
901 * or no queue assigned.
903 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
904 pf->vf_nb_qps == 0) {
905 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
909 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
910 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
911 vsi = pf->vfs[vf_idx].vsi;
913 if (!vsi->vlan_filter_on) {
914 vsi->vlan_filter_on = true;
915 i40e_aq_set_vsi_vlan_promisc(hw,
919 if (!vsi->vlan_anti_spoof_on)
920 i40e_add_rm_all_vlan_filter(
923 ret = i40e_vsi_add_vlan(vsi, vlan_id);
925 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
927 if (!i40e_vlan_filter_count(vsi)) {
928 vsi->vlan_filter_on = false;
929 i40e_aq_set_vsi_vlan_promisc(hw,
938 if (ret != I40E_SUCCESS) {
940 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
947 rte_pmd_i40e_get_vf_stats(uint16_t port,
949 struct rte_eth_stats *stats)
951 struct rte_eth_dev *dev;
953 struct i40e_vsi *vsi;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
957 dev = &rte_eth_devices[port];
959 if (!is_i40e_supported(dev))
962 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
964 if (vf_id >= pf->vf_num || !pf->vfs) {
965 PMD_DRV_LOG(ERR, "Invalid VF ID.");
969 vsi = pf->vfs[vf_id].vsi;
971 PMD_DRV_LOG(ERR, "Invalid VSI.");
975 i40e_update_vsi_stats(vsi);
977 stats->ipackets = vsi->eth_stats.rx_unicast +
978 vsi->eth_stats.rx_multicast +
979 vsi->eth_stats.rx_broadcast;
980 stats->opackets = vsi->eth_stats.tx_unicast +
981 vsi->eth_stats.tx_multicast +
982 vsi->eth_stats.tx_broadcast;
983 stats->ibytes = vsi->eth_stats.rx_bytes;
984 stats->obytes = vsi->eth_stats.tx_bytes;
985 stats->ierrors = vsi->eth_stats.rx_discards;
986 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
992 rte_pmd_i40e_reset_vf_stats(uint16_t port,
995 struct rte_eth_dev *dev;
997 struct i40e_vsi *vsi;
999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1001 dev = &rte_eth_devices[port];
1003 if (!is_i40e_supported(dev))
1006 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1008 if (vf_id >= pf->vf_num || !pf->vfs) {
1009 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1013 vsi = pf->vfs[vf_id].vsi;
1015 PMD_DRV_LOG(ERR, "Invalid VSI.");
1019 vsi->offset_loaded = false;
1020 i40e_update_vsi_stats(vsi);
1026 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1028 struct rte_eth_dev *dev;
1030 struct i40e_vsi *vsi;
1035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1037 dev = &rte_eth_devices[port];
1039 if (!is_i40e_supported(dev))
1042 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044 if (vf_id >= pf->vf_num || !pf->vfs) {
1045 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1049 vsi = pf->vfs[vf_id].vsi;
1051 PMD_DRV_LOG(ERR, "Invalid VSI.");
1055 if (bw > I40E_QOS_BW_MAX) {
1056 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1061 if (bw % I40E_QOS_BW_GRANULARITY) {
1062 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1063 I40E_QOS_BW_GRANULARITY);
1067 bw /= I40E_QOS_BW_GRANULARITY;
1069 hw = I40E_VSI_TO_HW(vsi);
1072 if (bw == vsi->bw_info.bw_limit) {
1074 "No change for VF max bandwidth. Nothing to do.");
1079 * VF bandwidth limitation and TC bandwidth limitation cannot be
1080 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1082 * If bw is 0, means disable bandwidth limitation. Then no need to
1083 * check TC bandwidth limitation.
1086 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1087 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1088 vsi->bw_info.bw_ets_credits[i])
1091 if (i != I40E_MAX_TRAFFIC_CLASS) {
1093 "TC max bandwidth has been set on this VF,"
1094 " please disable it first.");
1099 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1102 "Failed to set VF %d bandwidth, err(%d).",
1107 /* Store the configuration. */
1108 vsi->bw_info.bw_limit = (uint16_t)bw;
1109 vsi->bw_info.bw_max = 0;
1115 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1116 uint8_t tc_num, uint8_t *bw_weight)
1118 struct rte_eth_dev *dev;
1120 struct i40e_vsi *vsi;
1122 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1126 bool b_change = false;
1128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1130 dev = &rte_eth_devices[port];
1132 if (!is_i40e_supported(dev))
1135 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1137 if (vf_id >= pf->vf_num || !pf->vfs) {
1138 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1142 vsi = pf->vfs[vf_id].vsi;
1144 PMD_DRV_LOG(ERR, "Invalid VSI.");
1148 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1149 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1150 I40E_MAX_TRAFFIC_CLASS);
1155 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1156 if (vsi->enabled_tc & BIT_ULL(i))
1159 if (sum != tc_num) {
1161 "Weight should be set for all %d enabled TCs.",
1167 for (i = 0; i < tc_num; i++) {
1168 if (!bw_weight[i]) {
1170 "The weight should be 1 at least.");
1173 sum += bw_weight[i];
1177 "The summary of the TC weight should be 100.");
1182 * Create the configuration for all the TCs.
1184 memset(&tc_bw, 0, sizeof(tc_bw));
1185 tc_bw.tc_valid_bits = vsi->enabled_tc;
1187 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1188 if (vsi->enabled_tc & BIT_ULL(i)) {
1190 vsi->bw_info.bw_ets_share_credits[i])
1193 tc_bw.tc_bw_credits[i] = bw_weight[j];
1201 "No change for TC allocated bandwidth."
1206 hw = I40E_VSI_TO_HW(vsi);
1208 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1211 "Failed to set VF %d TC bandwidth weight, err(%d).",
1216 /* Store the configuration. */
1218 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1219 if (vsi->enabled_tc & BIT_ULL(i)) {
1220 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1229 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1230 uint8_t tc_no, uint32_t bw)
1232 struct rte_eth_dev *dev;
1234 struct i40e_vsi *vsi;
1236 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1240 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1242 dev = &rte_eth_devices[port];
1244 if (!is_i40e_supported(dev))
1247 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1249 if (vf_id >= pf->vf_num || !pf->vfs) {
1250 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1254 vsi = pf->vfs[vf_id].vsi;
1256 PMD_DRV_LOG(ERR, "Invalid VSI.");
1260 if (bw > I40E_QOS_BW_MAX) {
1261 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1266 if (bw % I40E_QOS_BW_GRANULARITY) {
1267 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1268 I40E_QOS_BW_GRANULARITY);
1272 bw /= I40E_QOS_BW_GRANULARITY;
1274 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1275 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1276 I40E_MAX_TRAFFIC_CLASS);
1280 hw = I40E_VSI_TO_HW(vsi);
1282 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1283 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1289 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1291 "No change for TC max bandwidth. Nothing to do.");
1296 * VF bandwidth limitation and TC bandwidth limitation cannot be
1297 * enabled in parallel, disable VF bandwidth limitation if it's
1299 * If bw is 0, means disable bandwidth limitation. Then no need to
1300 * care about VF bandwidth limitation configuration.
1302 if (bw && vsi->bw_info.bw_limit) {
1303 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1306 "Failed to disable VF(%d)"
1307 " bandwidth limitation, err(%d).",
1313 "VF max bandwidth is disabled according"
1314 " to TC max bandwidth setting.");
1318 * Get all the TCs' info to create a whole picture.
1319 * Because the incremental change isn't permitted.
1321 memset(&tc_bw, 0, sizeof(tc_bw));
1322 tc_bw.tc_valid_bits = vsi->enabled_tc;
1323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1324 if (vsi->enabled_tc & BIT_ULL(i)) {
1325 tc_bw.tc_bw_credits[i] =
1327 vsi->bw_info.bw_ets_credits[i]);
1330 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1332 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1335 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1340 /* Store the configuration. */
1341 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1347 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1349 struct rte_eth_dev *dev;
1351 struct i40e_vsi *vsi;
1352 struct i40e_veb *veb;
1354 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1360 dev = &rte_eth_devices[port];
1362 if (!is_i40e_supported(dev))
1365 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1369 PMD_DRV_LOG(ERR, "Invalid VSI.");
1375 PMD_DRV_LOG(ERR, "Invalid VEB.");
1379 if ((tc_map & veb->enabled_tc) != tc_map) {
1381 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1386 if (tc_map == veb->strict_prio_tc) {
1387 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1391 hw = I40E_VSI_TO_HW(vsi);
1393 /* Disable DCBx if it's the first time to set strict priority. */
1394 if (!veb->strict_prio_tc) {
1395 ret = i40e_aq_stop_lldp(hw, true, NULL);
1398 "Failed to disable DCBx as it's already"
1402 "DCBx is disabled according to strict"
1403 " priority setting.");
1406 memset(&ets_data, 0, sizeof(ets_data));
1407 ets_data.tc_valid_bits = veb->enabled_tc;
1408 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1409 ets_data.tc_strict_priority_flags = tc_map;
1410 /* Get all TCs' bandwidth. */
1411 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412 if (veb->enabled_tc & BIT_ULL(i)) {
1413 /* For rubust, if bandwidth is 0, use 1 instead. */
1414 if (veb->bw_info.bw_ets_share_credits[i])
1415 ets_data.tc_bw_share_credits[i] =
1416 veb->bw_info.bw_ets_share_credits[i];
1418 ets_data.tc_bw_share_credits[i] =
1419 I40E_QOS_BW_WEIGHT_MIN;
1423 if (!veb->strict_prio_tc)
1424 ret = i40e_aq_config_switch_comp_ets(
1425 hw, veb->uplink_seid,
1426 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1429 ret = i40e_aq_config_switch_comp_ets(
1430 hw, veb->uplink_seid,
1431 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1434 ret = i40e_aq_config_switch_comp_ets(
1435 hw, veb->uplink_seid,
1436 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1441 "Failed to set TCs' strict priority mode."
1446 veb->strict_prio_tc = tc_map;
1448 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1450 ret = i40e_aq_start_lldp(hw, NULL);
1453 "Failed to enable DCBx, err(%d).", ret);
1458 "DCBx is enabled again according to strict"
1459 " priority setting.");
1465 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1466 #define I40E_MAX_PROFILE_NUM 16
1469 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1470 uint32_t track_id, uint8_t *profile_info_sec,
1473 struct i40e_profile_section_header *sec = NULL;
1474 struct i40e_profile_info *pinfo;
1476 sec = (struct i40e_profile_section_header *)profile_info_sec;
1478 sec->data_end = sizeof(struct i40e_profile_section_header) +
1479 sizeof(struct i40e_profile_info);
1480 sec->section.type = SECTION_TYPE_INFO;
1481 sec->section.offset = sizeof(struct i40e_profile_section_header);
1482 sec->section.size = sizeof(struct i40e_profile_info);
1483 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1484 sec->section.offset);
1485 pinfo->track_id = track_id;
1486 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1487 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1489 pinfo->op = I40E_DDP_ADD_TRACKID;
1491 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1494 static enum i40e_status_code
1495 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1497 enum i40e_status_code status = I40E_SUCCESS;
1498 struct i40e_profile_section_header *sec;
1500 uint32_t offset = 0;
1503 sec = (struct i40e_profile_section_header *)profile_info_sec;
1504 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1505 sec->section.offset))->track_id;
1507 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1508 track_id, &offset, &info, NULL);
1510 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1511 "offset %d, info %d",
1517 /* Check if the profile info exists */
1519 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1521 struct rte_eth_dev *dev = &rte_eth_devices[port];
1522 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524 struct rte_pmd_i40e_profile_list *p_list;
1525 struct rte_pmd_i40e_profile_info *pinfo, *p;
1528 static const uint32_t group_mask = 0x00ff0000;
1530 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1531 sizeof(struct i40e_profile_section_header));
1532 if (pinfo->track_id == 0) {
1533 PMD_DRV_LOG(INFO, "Read-only profile.");
1536 buff = rte_zmalloc("pinfo_list",
1537 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1540 PMD_DRV_LOG(ERR, "failed to allocate memory");
1544 ret = i40e_aq_get_ddp_list(
1546 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1549 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1553 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1554 for (i = 0; i < p_list->p_count; i++) {
1555 p = &p_list->p_info[i];
1556 if (pinfo->track_id == p->track_id) {
1557 PMD_DRV_LOG(INFO, "Profile exists.");
1562 for (i = 0; i < p_list->p_count; i++) {
1563 p = &p_list->p_info[i];
1564 if ((p->track_id & group_mask) == 0) {
1565 PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1570 for (i = 0; i < p_list->p_count; i++) {
1571 p = &p_list->p_info[i];
1572 if ((pinfo->track_id & group_mask) !=
1573 (p->track_id & group_mask)) {
1574 PMD_DRV_LOG(INFO, "Profile of different group exists.");
1585 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1587 enum rte_pmd_i40e_package_op op)
1589 struct rte_eth_dev *dev;
1591 struct i40e_package_header *pkg_hdr;
1592 struct i40e_generic_seg_header *profile_seg_hdr;
1593 struct i40e_generic_seg_header *metadata_seg_hdr;
1595 uint8_t *profile_info_sec;
1597 enum i40e_status_code status = I40E_SUCCESS;
1598 static const uint32_t type_mask = 0xff000000;
1600 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1601 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1602 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1603 PMD_DRV_LOG(ERR, "Operation not supported.");
1607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1609 dev = &rte_eth_devices[port];
1611 if (!is_i40e_supported(dev))
1614 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1616 if (size < (sizeof(struct i40e_package_header) +
1617 sizeof(struct i40e_metadata_segment) +
1618 sizeof(uint32_t) * 2)) {
1619 PMD_DRV_LOG(ERR, "Buff is invalid.");
1623 pkg_hdr = (struct i40e_package_header *)buff;
1626 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1630 if (pkg_hdr->segment_count < 2) {
1631 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1635 /* Find metadata segment */
1636 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1638 if (!metadata_seg_hdr) {
1639 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1642 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1643 if (track_id == I40E_DDP_TRACKID_INVALID) {
1644 PMD_DRV_LOG(ERR, "Invalid track_id");
1648 /* force read-only track_id for type 0 */
1649 if ((track_id & type_mask) == 0)
1652 /* Find profile segment */
1653 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1655 if (!profile_seg_hdr) {
1656 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1660 profile_info_sec = rte_zmalloc(
1661 "i40e_profile_info",
1662 sizeof(struct i40e_profile_section_header) +
1663 sizeof(struct i40e_profile_info),
1665 if (!profile_info_sec) {
1666 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1670 /* Check if the profile already loaded */
1671 i40e_generate_profile_info_sec(
1672 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1673 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1674 track_id, profile_info_sec,
1675 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1676 is_exist = i40e_check_profile_info(port, profile_info_sec);
1678 PMD_DRV_LOG(ERR, "Failed to check profile.");
1679 rte_free(profile_info_sec);
1683 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1686 PMD_DRV_LOG(ERR, "Profile already exists.");
1687 else if (is_exist == 2)
1688 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1689 else if (is_exist == 3)
1690 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1691 i40e_update_customized_info(dev, buff, size, op);
1692 rte_free(profile_info_sec);
1695 } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1696 if (is_exist != 1) {
1697 PMD_DRV_LOG(ERR, "Profile does not exist.");
1698 rte_free(profile_info_sec);
1703 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1704 status = i40e_rollback_profile(
1706 (struct i40e_profile_segment *)profile_seg_hdr,
1709 PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1710 rte_free(profile_info_sec);
1714 status = i40e_write_profile(
1716 (struct i40e_profile_segment *)profile_seg_hdr,
1719 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1720 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1722 PMD_DRV_LOG(ERR, "Failed to write profile.");
1723 rte_free(profile_info_sec);
1728 if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1729 /* Modify loaded profiles info list */
1730 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1732 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1733 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1735 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1739 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1740 op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1741 i40e_update_customized_info(dev, buff, size, op);
1743 rte_free(profile_info_sec);
1747 /* Get number of tvl records in the section */
1749 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1751 unsigned int i, nb_rec, nb_tlv = 0;
1752 struct i40e_profile_tlv_section_record *tlv;
1757 /* get number of records in the section */
1758 nb_rec = sec->section.size /
1759 sizeof(struct i40e_profile_tlv_section_record);
1760 for (i = 0; i < nb_rec; ) {
1761 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1768 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1769 uint8_t *info_buff, uint32_t info_size,
1770 enum rte_pmd_i40e_package_info type)
1773 struct i40e_package_header *pkg_hdr;
1774 struct i40e_generic_seg_header *i40e_seg_hdr;
1775 struct i40e_generic_seg_header *note_seg_hdr;
1776 struct i40e_generic_seg_header *metadata_seg_hdr;
1779 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1783 if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1784 sizeof(struct i40e_metadata_segment) +
1785 sizeof(uint32_t) * 2)) {
1786 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1790 pkg_hdr = (struct i40e_package_header *)pkg_buff;
1791 if (pkg_hdr->segment_count < 2) {
1792 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1796 /* Find metadata segment */
1797 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1800 /* Find global notes segment */
1801 note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1804 /* Find i40e profile segment */
1805 i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1807 /* get global header info */
1808 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1809 struct rte_pmd_i40e_profile_info *info =
1810 (struct rte_pmd_i40e_profile_info *)info_buff;
1812 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1813 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1817 if (!metadata_seg_hdr) {
1818 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1822 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1823 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1825 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1828 ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1829 I40E_DDP_NAME_SIZE);
1830 memcpy(&info->version,
1831 &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1832 sizeof(struct i40e_ddp_version));
1833 return I40E_SUCCESS;
1836 /* get global note size */
1837 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1838 if (info_size < sizeof(uint32_t)) {
1839 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1842 if (note_seg_hdr == NULL)
1845 ret_size = note_seg_hdr->size;
1846 *(uint32_t *)info_buff = ret_size;
1847 return I40E_SUCCESS;
1850 /* get global note */
1851 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1852 if (note_seg_hdr == NULL)
1854 if (info_size < note_seg_hdr->size) {
1855 PMD_DRV_LOG(ERR, "Information buffer size is too small");
1858 memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size);
1859 return I40E_SUCCESS;
1862 /* get i40e segment header info */
1863 if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1864 struct rte_pmd_i40e_profile_info *info =
1865 (struct rte_pmd_i40e_profile_info *)info_buff;
1867 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1868 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1872 if (!metadata_seg_hdr) {
1873 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1877 if (!i40e_seg_hdr) {
1878 PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1882 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1883 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1885 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1888 ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1889 I40E_DDP_NAME_SIZE);
1890 memcpy(&info->version,
1891 &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1892 sizeof(struct i40e_ddp_version));
1893 return I40E_SUCCESS;
1896 /* get number of devices */
1897 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1898 if (info_size < sizeof(uint32_t)) {
1899 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1902 *(uint32_t *)info_buff =
1903 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1904 return I40E_SUCCESS;
1907 /* get list of devices */
1908 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1911 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1912 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1913 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1917 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1918 sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1919 return I40E_SUCCESS;
1922 /* get number of protocols */
1923 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1924 struct i40e_profile_section_header *proto;
1926 if (info_size < sizeof(uint32_t)) {
1927 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1930 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1931 (struct i40e_profile_segment *)i40e_seg_hdr);
1932 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1933 return I40E_SUCCESS;
1936 /* get list of protocols */
1937 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1938 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1939 struct rte_pmd_i40e_proto_info *pinfo;
1940 struct i40e_profile_section_header *proto;
1941 struct i40e_profile_tlv_section_record *tlv;
1943 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1944 nb_proto_info = info_size /
1945 sizeof(struct rte_pmd_i40e_proto_info);
1946 for (i = 0; i < nb_proto_info; i++) {
1947 pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1948 memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1950 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1951 (struct i40e_profile_segment *)i40e_seg_hdr);
1952 nb_tlv = i40e_get_tlv_section_size(proto);
1954 return I40E_SUCCESS;
1955 if (nb_proto_info < nb_tlv) {
1956 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1959 /* get number of records in the section */
1960 nb_rec = proto->section.size /
1961 sizeof(struct i40e_profile_tlv_section_record);
1962 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1963 for (i = j = 0; i < nb_rec; j++) {
1964 pinfo[j].proto_id = tlv->data[0];
1965 snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1966 (const char *)&tlv->data[1]);
1968 tlv = &tlv[tlv->len];
1970 return I40E_SUCCESS;
1973 /* get number of packet classification types */
1974 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1975 struct i40e_profile_section_header *pctype;
1977 if (info_size < sizeof(uint32_t)) {
1978 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1981 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1982 (struct i40e_profile_segment *)i40e_seg_hdr);
1983 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1984 return I40E_SUCCESS;
1987 /* get list of packet classification types */
1988 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1989 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1990 struct rte_pmd_i40e_ptype_info *pinfo;
1991 struct i40e_profile_section_header *pctype;
1992 struct i40e_profile_tlv_section_record *tlv;
1994 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1995 nb_proto_info = info_size /
1996 sizeof(struct rte_pmd_i40e_ptype_info);
1997 for (i = 0; i < nb_proto_info; i++)
1998 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1999 sizeof(struct rte_pmd_i40e_ptype_info));
2000 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2001 (struct i40e_profile_segment *)i40e_seg_hdr);
2002 nb_tlv = i40e_get_tlv_section_size(pctype);
2004 return I40E_SUCCESS;
2005 if (nb_proto_info < nb_tlv) {
2006 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2010 /* get number of records in the section */
2011 nb_rec = pctype->section.size /
2012 sizeof(struct i40e_profile_tlv_section_record);
2013 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2014 for (i = j = 0; i < nb_rec; j++) {
2015 memcpy(&pinfo[j], tlv->data,
2016 sizeof(struct rte_pmd_i40e_ptype_info));
2018 tlv = &tlv[tlv->len];
2020 return I40E_SUCCESS;
2023 /* get number of packet types */
2024 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2025 struct i40e_profile_section_header *ptype;
2027 if (info_size < sizeof(uint32_t)) {
2028 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2031 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2032 (struct i40e_profile_segment *)i40e_seg_hdr);
2033 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2034 return I40E_SUCCESS;
2037 /* get list of packet types */
2038 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2039 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2040 struct rte_pmd_i40e_ptype_info *pinfo;
2041 struct i40e_profile_section_header *ptype;
2042 struct i40e_profile_tlv_section_record *tlv;
2044 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2045 nb_proto_info = info_size /
2046 sizeof(struct rte_pmd_i40e_ptype_info);
2047 for (i = 0; i < nb_proto_info; i++)
2048 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2049 sizeof(struct rte_pmd_i40e_ptype_info));
2050 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2051 (struct i40e_profile_segment *)i40e_seg_hdr);
2052 nb_tlv = i40e_get_tlv_section_size(ptype);
2054 return I40E_SUCCESS;
2055 if (nb_proto_info < nb_tlv) {
2056 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2059 /* get number of records in the section */
2060 nb_rec = ptype->section.size /
2061 sizeof(struct i40e_profile_tlv_section_record);
2062 for (i = j = 0; i < nb_rec; j++) {
2063 tlv = (struct i40e_profile_tlv_section_record *)
2065 memcpy(&pinfo[j], tlv->data,
2066 sizeof(struct rte_pmd_i40e_ptype_info));
2069 return I40E_SUCCESS;
2072 PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2077 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2079 struct rte_eth_dev *dev;
2081 enum i40e_status_code status = I40E_SUCCESS;
2083 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2085 dev = &rte_eth_devices[port];
2087 if (!is_i40e_supported(dev))
2090 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2093 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2095 status = i40e_aq_get_ddp_list(hw, (void *)buff,
2101 static int check_invalid_pkt_type(uint32_t pkt_type)
2103 uint32_t l2, l3, l4, tnl, il2, il3, il4;
2105 l2 = pkt_type & RTE_PTYPE_L2_MASK;
2106 l3 = pkt_type & RTE_PTYPE_L3_MASK;
2107 l4 = pkt_type & RTE_PTYPE_L4_MASK;
2108 tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2109 il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2110 il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2111 il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2114 l2 != RTE_PTYPE_L2_ETHER &&
2115 l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2116 l2 != RTE_PTYPE_L2_ETHER_ARP &&
2117 l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2118 l2 != RTE_PTYPE_L2_ETHER_NSH &&
2119 l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2120 l2 != RTE_PTYPE_L2_ETHER_QINQ)
2124 l3 != RTE_PTYPE_L3_IPV4 &&
2125 l3 != RTE_PTYPE_L3_IPV4_EXT &&
2126 l3 != RTE_PTYPE_L3_IPV6 &&
2127 l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2128 l3 != RTE_PTYPE_L3_IPV6_EXT &&
2129 l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2133 l4 != RTE_PTYPE_L4_TCP &&
2134 l4 != RTE_PTYPE_L4_UDP &&
2135 l4 != RTE_PTYPE_L4_FRAG &&
2136 l4 != RTE_PTYPE_L4_SCTP &&
2137 l4 != RTE_PTYPE_L4_ICMP &&
2138 l4 != RTE_PTYPE_L4_NONFRAG)
2142 tnl != RTE_PTYPE_TUNNEL_IP &&
2143 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2144 tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2145 tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2146 tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2147 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2148 tnl != RTE_PTYPE_TUNNEL_GTPC &&
2149 tnl != RTE_PTYPE_TUNNEL_GTPU)
2153 il2 != RTE_PTYPE_INNER_L2_ETHER &&
2154 il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2155 il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2159 il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2160 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2161 il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2162 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2163 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2164 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2168 il4 != RTE_PTYPE_INNER_L4_TCP &&
2169 il4 != RTE_PTYPE_INNER_L4_UDP &&
2170 il4 != RTE_PTYPE_INNER_L4_FRAG &&
2171 il4 != RTE_PTYPE_INNER_L4_SCTP &&
2172 il4 != RTE_PTYPE_INNER_L4_ICMP &&
2173 il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2179 static int check_invalid_ptype_mapping(
2180 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2185 for (i = 0; i < count; i++) {
2186 uint16_t ptype = mapping_table[i].hw_ptype;
2187 uint32_t pkt_type = mapping_table[i].sw_ptype;
2189 if (ptype >= I40E_MAX_PKT_TYPE)
2192 if (pkt_type == RTE_PTYPE_UNKNOWN)
2195 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2198 if (check_invalid_pkt_type(pkt_type))
2206 rte_pmd_i40e_ptype_mapping_update(
2208 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2212 struct rte_eth_dev *dev;
2213 struct i40e_adapter *ad;
2216 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2218 dev = &rte_eth_devices[port];
2220 if (!is_i40e_supported(dev))
2223 if (count > I40E_MAX_PKT_TYPE)
2226 if (check_invalid_ptype_mapping(mapping_items, count))
2229 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2232 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2233 ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2236 for (i = 0; i < count; i++)
2237 ad->ptype_tbl[mapping_items[i].hw_ptype]
2238 = mapping_items[i].sw_ptype;
2243 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2245 struct rte_eth_dev *dev;
2247 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2249 dev = &rte_eth_devices[port];
2251 if (!is_i40e_supported(dev))
2254 i40e_set_default_ptype_table(dev);
2259 int rte_pmd_i40e_ptype_mapping_get(
2261 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2266 struct rte_eth_dev *dev;
2267 struct i40e_adapter *ad;
2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2273 dev = &rte_eth_devices[port];
2275 if (!is_i40e_supported(dev))
2278 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2280 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2283 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2285 mapping_items[n].hw_ptype = i;
2286 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2294 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2299 struct rte_eth_dev *dev;
2300 struct i40e_adapter *ad;
2303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2305 dev = &rte_eth_devices[port];
2307 if (!is_i40e_supported(dev))
2310 if (!mask && check_invalid_pkt_type(target))
2313 if (check_invalid_pkt_type(pkt_type))
2316 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2318 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2320 if ((target | ad->ptype_tbl[i]) == target &&
2321 (target & ad->ptype_tbl[i]))
2322 ad->ptype_tbl[i] = pkt_type;
2324 if (ad->ptype_tbl[i] == target)
2325 ad->ptype_tbl[i] = pkt_type;
2333 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2334 struct ether_addr *mac_addr)
2336 struct rte_eth_dev *dev;
2337 struct i40e_pf_vf *vf;
2338 struct i40e_vsi *vsi;
2340 struct i40e_mac_filter_info mac_filter;
2343 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2346 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2348 dev = &rte_eth_devices[port];
2350 if (!is_i40e_supported(dev))
2353 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2355 if (vf_id >= pf->vf_num || !pf->vfs)
2358 vf = &pf->vfs[vf_id];
2361 PMD_DRV_LOG(ERR, "Invalid VSI.");
2365 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2366 ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2367 ret = i40e_vsi_add_mac(vsi, &mac_filter);
2368 if (ret != I40E_SUCCESS) {
2369 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2376 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2378 struct rte_eth_dev *dev;
2380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2382 dev = &rte_eth_devices[port];
2384 if (!is_i40e_supported(dev))
2387 i40e_set_default_pctype_table(dev);
2392 int rte_pmd_i40e_flow_type_mapping_get(
2394 struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2396 struct rte_eth_dev *dev;
2397 struct i40e_adapter *ad;
2400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2402 dev = &rte_eth_devices[port];
2404 if (!is_i40e_supported(dev))
2407 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2409 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2410 mapping_items[i].flow_type = i;
2411 mapping_items[i].pctype = ad->pctypes_tbl[i];
2418 rte_pmd_i40e_flow_type_mapping_update(
2420 struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2424 struct rte_eth_dev *dev;
2425 struct i40e_adapter *ad;
2428 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2430 dev = &rte_eth_devices[port];
2432 if (!is_i40e_supported(dev))
2435 if (count > I40E_FLOW_TYPE_MAX)
2438 for (i = 0; i < count; i++)
2439 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2440 mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2441 (mapping_items[i].pctype &
2442 (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2445 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2448 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2449 ad->pctypes_tbl[i] = 0ULL;
2450 ad->flow_types_mask = 0ULL;
2453 for (i = 0; i < count; i++) {
2454 ad->pctypes_tbl[mapping_items[i].flow_type] =
2455 mapping_items[i].pctype;
2456 if (mapping_items[i].pctype)
2457 ad->flow_types_mask |=
2458 (1ULL << mapping_items[i].flow_type);
2460 ad->flow_types_mask &=
2461 ~(1ULL << mapping_items[i].flow_type);
2464 for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2465 ad->pctypes_mask |= ad->pctypes_tbl[i];
2471 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2473 struct rte_eth_dev *dev;
2474 struct ether_addr *mac;
2477 struct i40e_pf_vf *vf;
2480 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2481 dev = &rte_eth_devices[port];
2483 if (!is_i40e_supported(dev))
2486 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2487 vf_num = pf->vf_num;
2489 for (vf_id = 0; vf_id < vf_num; vf_id++) {
2490 vf = &pf->vfs[vf_id];
2491 mac = &vf->mac_addr;
2493 if (is_same_ether_addr(mac, vf_mac))
2501 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2505 struct i40e_vsi *vsi = pf->main_vsi;
2506 uint16_t queue_offset, bsf, tc_index;
2507 struct i40e_vsi_context ctxt;
2508 struct i40e_aqc_vsi_properties_data *vsi_info;
2509 struct i40e_queue_regions *region_info =
2511 int32_t ret = -EINVAL;
2513 if (!region_info->queue_region_number) {
2514 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2518 memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2520 /* Update Queue Pairs Mapping for currently enabled UPs */
2521 ctxt.seid = vsi->seid;
2522 ctxt.pf_num = hw->pf_id;
2524 ctxt.uplink_seid = vsi->uplink_seid;
2525 ctxt.info = vsi->info;
2526 vsi_info = &ctxt.info;
2528 memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2529 memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2531 /* Configure queue region and queue mapping parameters,
2532 * for enabled queue region, allocate queues to this region.
2535 for (i = 0; i < region_info->queue_region_number; i++) {
2536 tc_index = region_info->region[i].region_id;
2537 bsf = rte_bsf32(region_info->region[i].queue_num);
2538 queue_offset = region_info->region[i].queue_start_index;
2539 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2540 (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2541 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2544 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2545 vsi_info->mapping_flags |=
2546 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2547 vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2548 vsi_info->valid_sections |=
2549 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2551 /* Update the VSI after updating the VSI queue-mapping information */
2552 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2554 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2555 hw->aq.asq_last_status);
2558 /* update the local VSI info with updated queue map */
2559 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2560 sizeof(vsi->info.tc_mapping));
2561 rte_memcpy(&vsi->info.queue_mapping,
2562 &ctxt.info.queue_mapping,
2563 sizeof(vsi->info.queue_mapping));
2564 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2565 vsi->info.valid_sections = 0;
2572 i40e_queue_region_set_region(struct i40e_pf *pf,
2573 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2576 struct i40e_vsi *main_vsi = pf->main_vsi;
2577 struct i40e_queue_regions *info = &pf->queue_region;
2578 int32_t ret = -EINVAL;
2580 if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2581 conf_ptr->queue_num <= 64)) {
2582 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2583 "total number of queues do not exceed the VSI allocation");
2587 if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2588 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2592 if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2593 > main_vsi->nb_used_qps) {
2594 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2598 for (i = 0; i < info->queue_region_number; i++)
2599 if (conf_ptr->region_id == info->region[i].region_id)
2602 if (i == info->queue_region_number &&
2603 i <= I40E_REGION_MAX_INDEX) {
2604 info->region[i].region_id = conf_ptr->region_id;
2605 info->region[i].queue_num = conf_ptr->queue_num;
2606 info->region[i].queue_start_index =
2607 conf_ptr->queue_start_index;
2608 info->queue_region_number++;
2610 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2618 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2619 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2621 int32_t ret = -EINVAL;
2622 struct i40e_queue_regions *info = &pf->queue_region;
2624 uint16_t region_index, flowtype_index;
2626 /* For the pctype or hardware flowtype of packet,
2627 * the specific index for each type has been defined
2628 * in file i40e_type.h as enum i40e_filter_pctype.
2631 if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2632 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2636 if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2637 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2642 for (i = 0; i < info->queue_region_number; i++)
2643 if (rss_region_conf->region_id == info->region[i].region_id)
2646 if (i == info->queue_region_number) {
2647 PMD_DRV_LOG(ERR, "that region id has not been set before");
2653 for (i = 0; i < info->queue_region_number; i++) {
2654 for (j = 0; j < info->region[i].flowtype_num; j++) {
2655 if (rss_region_conf->hw_flowtype ==
2656 info->region[i].hw_flowtype[j]) {
2657 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2663 flowtype_index = info->region[region_index].flowtype_num;
2664 info->region[region_index].hw_flowtype[flowtype_index] =
2665 rss_region_conf->hw_flowtype;
2666 info->region[region_index].flowtype_num++;
2672 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2675 uint8_t hw_flowtype;
2676 uint32_t pfqf_hregion;
2677 uint16_t i, j, index;
2678 struct i40e_queue_regions *info = &pf->queue_region;
2680 /* For the pctype or hardware flowtype of packet,
2681 * the specific index for each type has been defined
2682 * in file i40e_type.h as enum i40e_filter_pctype.
2685 for (i = 0; i < info->queue_region_number; i++) {
2686 for (j = 0; j < info->region[i].flowtype_num; j++) {
2687 hw_flowtype = info->region[i].hw_flowtype[j];
2688 index = hw_flowtype >> 3;
2690 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2692 if ((hw_flowtype & 0x7) == 0) {
2693 pfqf_hregion |= info->region[i].region_id <<
2694 I40E_PFQF_HREGION_REGION_0_SHIFT;
2695 pfqf_hregion |= 1 <<
2696 I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2697 } else if ((hw_flowtype & 0x7) == 1) {
2698 pfqf_hregion |= info->region[i].region_id <<
2699 I40E_PFQF_HREGION_REGION_1_SHIFT;
2700 pfqf_hregion |= 1 <<
2701 I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2702 } else if ((hw_flowtype & 0x7) == 2) {
2703 pfqf_hregion |= info->region[i].region_id <<
2704 I40E_PFQF_HREGION_REGION_2_SHIFT;
2705 pfqf_hregion |= 1 <<
2706 I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2707 } else if ((hw_flowtype & 0x7) == 3) {
2708 pfqf_hregion |= info->region[i].region_id <<
2709 I40E_PFQF_HREGION_REGION_3_SHIFT;
2710 pfqf_hregion |= 1 <<
2711 I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2712 } else if ((hw_flowtype & 0x7) == 4) {
2713 pfqf_hregion |= info->region[i].region_id <<
2714 I40E_PFQF_HREGION_REGION_4_SHIFT;
2715 pfqf_hregion |= 1 <<
2716 I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2717 } else if ((hw_flowtype & 0x7) == 5) {
2718 pfqf_hregion |= info->region[i].region_id <<
2719 I40E_PFQF_HREGION_REGION_5_SHIFT;
2720 pfqf_hregion |= 1 <<
2721 I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2722 } else if ((hw_flowtype & 0x7) == 6) {
2723 pfqf_hregion |= info->region[i].region_id <<
2724 I40E_PFQF_HREGION_REGION_6_SHIFT;
2725 pfqf_hregion |= 1 <<
2726 I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2728 pfqf_hregion |= info->region[i].region_id <<
2729 I40E_PFQF_HREGION_REGION_7_SHIFT;
2730 pfqf_hregion |= 1 <<
2731 I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2734 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2741 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2742 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2744 struct i40e_queue_regions *info = &pf->queue_region;
2745 int32_t ret = -EINVAL;
2746 uint16_t i, j, region_index;
2748 if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2749 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2753 if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2754 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2758 for (i = 0; i < info->queue_region_number; i++)
2759 if (rss_region_conf->region_id == info->region[i].region_id)
2762 if (i == info->queue_region_number) {
2763 PMD_DRV_LOG(ERR, "that region id has not been set before");
2770 for (i = 0; i < info->queue_region_number; i++) {
2771 for (j = 0; j < info->region[i].user_priority_num; j++) {
2772 if (info->region[i].user_priority[j] ==
2773 rss_region_conf->user_priority) {
2774 PMD_DRV_LOG(ERR, "that user priority has been set before");
2780 j = info->region[region_index].user_priority_num;
2781 info->region[region_index].user_priority[j] =
2782 rss_region_conf->user_priority;
2783 info->region[region_index].user_priority_num++;
2789 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2792 struct i40e_dcbx_config dcb_cfg_local;
2793 struct i40e_dcbx_config *dcb_cfg;
2794 struct i40e_queue_regions *info = &pf->queue_region;
2795 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2796 int32_t ret = -EINVAL;
2797 uint16_t i, j, prio_index, region_index;
2798 uint8_t tc_map, tc_bw, bw_lf;
2800 if (!info->queue_region_number) {
2801 PMD_DRV_LOG(ERR, "No queue region been set before");
2805 dcb_cfg = &dcb_cfg_local;
2806 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2808 /* assume each tc has the same bw */
2809 tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2810 for (i = 0; i < info->queue_region_number; i++)
2811 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2812 /* to ensure the sum of tcbw is equal to 100 */
2813 bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
2814 for (i = 0; i < bw_lf; i++)
2815 dcb_cfg->etscfg.tcbwtable[i]++;
2817 /* assume each tc has the same Transmission Selection Algorithm */
2818 for (i = 0; i < info->queue_region_number; i++)
2819 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2821 for (i = 0; i < info->queue_region_number; i++) {
2822 for (j = 0; j < info->region[i].user_priority_num; j++) {
2823 prio_index = info->region[i].user_priority[j];
2824 region_index = info->region[i].region_id;
2825 dcb_cfg->etscfg.prioritytable[prio_index] =
2830 /* FW needs one App to configure HW */
2831 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2832 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2833 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2834 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2836 tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2838 dcb_cfg->pfc.willing = 0;
2839 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2840 dcb_cfg->pfc.pfcenable = tc_map;
2842 /* Copy the new config to the current config */
2843 *old_cfg = *dcb_cfg;
2844 old_cfg->etsrec = old_cfg->etscfg;
2845 ret = i40e_set_dcb_config(hw);
2848 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2849 i40e_stat_str(hw, ret),
2850 i40e_aq_str(hw, hw->aq.asq_last_status));
2858 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2859 struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2861 int32_t ret = -EINVAL;
2862 struct i40e_queue_regions *info = &pf->queue_region;
2863 struct i40e_vsi *main_vsi = pf->main_vsi;
2866 i40e_queue_region_pf_flowtype_conf(hw, pf);
2868 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2869 if (ret != I40E_SUCCESS) {
2870 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2874 ret = i40e_queue_region_dcb_configure(hw, pf);
2875 if (ret != I40E_SUCCESS) {
2876 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2883 if (info->queue_region_number) {
2884 info->queue_region_number = 1;
2885 info->region[0].queue_num = main_vsi->nb_used_qps;
2886 info->region[0].queue_start_index = 0;
2888 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2889 if (ret != I40E_SUCCESS)
2890 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2892 ret = i40e_dcb_init_configure(dev, TRUE);
2893 if (ret != I40E_SUCCESS) {
2894 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2895 pf->flags &= ~I40E_FLAG_DCB;
2898 i40e_init_queue_region_conf(dev);
2904 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2906 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2909 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2910 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2919 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2920 struct i40e_queue_regions *regions_ptr)
2922 struct i40e_queue_regions *info = &pf->queue_region;
2924 rte_memcpy(regions_ptr, info,
2925 sizeof(struct i40e_queue_regions));
2930 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2931 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2933 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2934 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2935 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2940 if (!is_i40e_supported(dev))
2943 if (!(!i40e_queue_region_pf_check_rss(pf)))
2946 /* This queue region feature only support pf by now. It should
2947 * be called after dev_start, and will be clear after dev_stop.
2948 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2949 * is just an enable function which server for other configuration,
2950 * it is for all configuration about queue region from up layer,
2951 * at first will only keep in DPDK softwarestored in driver,
2952 * only after "FLUSH_ON", it commit all configuration to HW.
2953 * Because PMD had to set hardware configuration at a time, so
2954 * it will record all up layer command at first.
2955 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2956 * just clean all configuration about queue region just now,
2957 * and restore all to DPDK i40e driver default
2958 * config when start up.
2962 case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2963 ret = i40e_queue_region_set_region(pf,
2964 (struct rte_pmd_i40e_queue_region_conf *)arg);
2966 case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2967 ret = i40e_queue_region_set_flowtype(pf,
2968 (struct rte_pmd_i40e_queue_region_conf *)arg);
2970 case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2971 ret = i40e_queue_region_set_user_priority(pf,
2972 (struct rte_pmd_i40e_queue_region_conf *)arg);
2974 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2975 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2977 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2978 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2980 case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2981 ret = i40e_queue_region_get_all_info(pf,
2982 (struct i40e_queue_regions *)arg);
2985 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2990 I40E_WRITE_FLUSH(hw);
2995 int rte_pmd_i40e_flow_add_del_packet_template(
2997 const struct rte_pmd_i40e_pkt_template_conf *conf,
3000 struct rte_eth_dev *dev = &rte_eth_devices[port];
3001 struct i40e_fdir_filter_conf filter_conf;
3003 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3005 if (!is_i40e_supported(dev))
3008 memset(&filter_conf, 0, sizeof(filter_conf));
3009 filter_conf.soft_id = conf->soft_id;
3010 filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3011 filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3012 filter_conf.input.flow.raw_flow.length = conf->input.length;
3013 filter_conf.input.flow_ext.pkt_template = true;
3015 filter_conf.action.rx_queue = conf->action.rx_queue;
3016 filter_conf.action.behavior =
3017 (enum i40e_fdir_behavior)conf->action.behavior;
3018 filter_conf.action.report_status =
3019 (enum i40e_fdir_status)conf->action.report_status;
3020 filter_conf.action.flex_off = conf->action.flex_off;
3022 return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);