4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "rte_pmd_i40e.h"
45 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
47 struct rte_eth_dev *dev;
50 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52 dev = &rte_eth_devices[port];
54 if (!is_i40e_supported(dev))
57 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59 if (vf >= pf->vf_num || !pf->vfs) {
60 PMD_DRV_LOG(ERR, "Invalid argument.");
64 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
70 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
72 struct rte_eth_dev *dev;
76 struct i40e_vsi_context ctxt;
79 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
81 dev = &rte_eth_devices[port];
83 if (!is_i40e_supported(dev))
86 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
88 if (vf_id >= pf->vf_num || !pf->vfs) {
89 PMD_DRV_LOG(ERR, "Invalid argument.");
93 vsi = pf->vfs[vf_id].vsi;
95 PMD_DRV_LOG(ERR, "Invalid VSI.");
99 /* Check if it has been already on or off */
100 if (vsi->info.valid_sections &
101 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
103 if ((vsi->info.sec_flags &
104 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
105 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
106 return 0; /* already on */
108 if ((vsi->info.sec_flags &
109 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
110 return 0; /* already off */
114 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
116 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
118 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
120 memset(&ctxt, 0, sizeof(ctxt));
121 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
122 ctxt.seid = vsi->seid;
124 hw = I40E_VSI_TO_HW(vsi);
125 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
126 if (ret != I40E_SUCCESS) {
128 PMD_DRV_LOG(ERR, "Failed to update VSI params");
135 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
139 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
140 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
143 for (j = 0; j < I40E_VFTA_SIZE; j++) {
147 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
148 if (!(vsi->vfta[j] & (1 << k)))
151 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
155 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
157 ret = i40e_aq_add_vlan(hw, vsi->seid,
158 &vlan_data, 1, NULL);
160 ret = i40e_aq_remove_vlan(hw, vsi->seid,
161 &vlan_data, 1, NULL);
162 if (ret != I40E_SUCCESS) {
164 "Failed to add/rm vlan filter");
174 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
176 struct rte_eth_dev *dev;
178 struct i40e_vsi *vsi;
180 struct i40e_vsi_context ctxt;
183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
185 dev = &rte_eth_devices[port];
187 if (!is_i40e_supported(dev))
190 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
192 if (vf_id >= pf->vf_num || !pf->vfs) {
193 PMD_DRV_LOG(ERR, "Invalid argument.");
197 vsi = pf->vfs[vf_id].vsi;
199 PMD_DRV_LOG(ERR, "Invalid VSI.");
203 /* Check if it has been already on or off */
204 if (vsi->vlan_anti_spoof_on == on)
205 return 0; /* already on or off */
207 vsi->vlan_anti_spoof_on = on;
208 if (!vsi->vlan_filter_on) {
209 ret = i40e_add_rm_all_vlan_filter(vsi, on);
211 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
216 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
218 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
220 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
222 memset(&ctxt, 0, sizeof(ctxt));
223 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
224 ctxt.seid = vsi->seid;
226 hw = I40E_VSI_TO_HW(vsi);
227 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
228 if (ret != I40E_SUCCESS) {
230 PMD_DRV_LOG(ERR, "Failed to update VSI params");
237 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
239 struct i40e_mac_filter *f;
240 struct i40e_macvlan_filter *mv_f;
242 enum rte_mac_filter_type filter_type;
243 int ret = I40E_SUCCESS;
246 /* remove all the MACs */
247 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
248 vlan_num = vsi->vlan_num;
249 filter_type = f->mac_info.filter_type;
250 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
251 filter_type == RTE_MACVLAN_HASH_MATCH) {
253 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
254 return I40E_ERR_PARAM;
256 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
257 filter_type == RTE_MAC_HASH_MATCH)
260 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
262 PMD_DRV_LOG(ERR, "failed to allocate memory");
263 return I40E_ERR_NO_MEMORY;
266 for (i = 0; i < vlan_num; i++) {
267 mv_f[i].filter_type = filter_type;
268 rte_memcpy(&mv_f[i].macaddr,
269 &f->mac_info.mac_addr,
272 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
273 filter_type == RTE_MACVLAN_HASH_MATCH) {
274 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
275 &f->mac_info.mac_addr);
276 if (ret != I40E_SUCCESS) {
282 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
283 if (ret != I40E_SUCCESS) {
296 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
298 struct i40e_mac_filter *f;
299 struct i40e_macvlan_filter *mv_f;
301 int ret = I40E_SUCCESS;
304 /* restore all the MACs */
305 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
306 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
307 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
309 * If vlan_num is 0, that's the first time to add mac,
310 * set mask for vlan_id 0.
312 if (vsi->vlan_num == 0) {
313 i40e_set_vlan_filter(vsi, 0, 1);
316 vlan_num = vsi->vlan_num;
317 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
318 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
321 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
323 PMD_DRV_LOG(ERR, "failed to allocate memory");
324 return I40E_ERR_NO_MEMORY;
327 for (i = 0; i < vlan_num; i++) {
328 mv_f[i].filter_type = f->mac_info.filter_type;
329 rte_memcpy(&mv_f[i].macaddr,
330 &f->mac_info.mac_addr,
334 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
335 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
336 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
337 &f->mac_info.mac_addr);
338 if (ret != I40E_SUCCESS) {
344 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
345 if (ret != I40E_SUCCESS) {
358 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
360 struct i40e_vsi_context ctxt;
367 hw = I40E_VSI_TO_HW(vsi);
369 /* Use the FW API if FW >= v5.0 */
370 if (hw->aq.fw_maj_ver < 5) {
371 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
375 /* Check if it has been already on or off */
376 if (vsi->info.valid_sections &
377 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
379 if ((vsi->info.switch_id &
380 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
381 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
382 return 0; /* already on */
384 if ((vsi->info.switch_id &
385 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
386 return 0; /* already off */
390 /* remove all the MAC and VLAN first */
391 ret = i40e_vsi_rm_mac_filter(vsi);
393 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
396 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
399 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
404 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
406 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
408 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
410 memset(&ctxt, 0, sizeof(ctxt));
411 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
412 ctxt.seid = vsi->seid;
414 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
415 if (ret != I40E_SUCCESS) {
416 PMD_DRV_LOG(ERR, "Failed to update VSI params");
420 /* add all the MAC and VLAN back */
421 ret = i40e_vsi_restore_mac_filter(vsi);
424 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
425 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
434 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
436 struct rte_eth_dev *dev;
438 struct i40e_pf_vf *vf;
439 struct i40e_vsi *vsi;
443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
445 dev = &rte_eth_devices[port];
447 if (!is_i40e_supported(dev))
450 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
452 /* setup PF TX loopback */
454 ret = i40e_vsi_set_tx_loopback(vsi, on);
458 /* setup TX loopback for all the VFs */
460 /* if no VF, do nothing. */
464 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
465 vf = &pf->vfs[vf_id];
468 ret = i40e_vsi_set_tx_loopback(vsi, on);
477 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
479 struct rte_eth_dev *dev;
481 struct i40e_vsi *vsi;
485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
487 dev = &rte_eth_devices[port];
489 if (!is_i40e_supported(dev))
492 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
494 if (vf_id >= pf->vf_num || !pf->vfs) {
495 PMD_DRV_LOG(ERR, "Invalid argument.");
499 vsi = pf->vfs[vf_id].vsi;
501 PMD_DRV_LOG(ERR, "Invalid VSI.");
505 hw = I40E_VSI_TO_HW(vsi);
507 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
509 if (ret != I40E_SUCCESS) {
511 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
518 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
520 struct rte_eth_dev *dev;
522 struct i40e_vsi *vsi;
526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
528 dev = &rte_eth_devices[port];
530 if (!is_i40e_supported(dev))
533 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
535 if (vf_id >= pf->vf_num || !pf->vfs) {
536 PMD_DRV_LOG(ERR, "Invalid argument.");
540 vsi = pf->vfs[vf_id].vsi;
542 PMD_DRV_LOG(ERR, "Invalid VSI.");
546 hw = I40E_VSI_TO_HW(vsi);
548 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
550 if (ret != I40E_SUCCESS) {
552 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
559 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
560 struct ether_addr *mac_addr)
562 struct i40e_mac_filter *f;
563 struct rte_eth_dev *dev;
564 struct i40e_pf_vf *vf;
565 struct i40e_vsi *vsi;
569 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
574 dev = &rte_eth_devices[port];
576 if (!is_i40e_supported(dev))
579 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
581 if (vf_id >= pf->vf_num || !pf->vfs)
584 vf = &pf->vfs[vf_id];
587 PMD_DRV_LOG(ERR, "Invalid VSI.");
591 ether_addr_copy(mac_addr, &vf->mac_addr);
593 /* Remove all existing mac */
594 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
595 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
597 PMD_DRV_LOG(WARNING, "Delete MAC failed");
602 /* Set vlan strip on/off for specific VF from host */
604 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
606 struct rte_eth_dev *dev;
608 struct i40e_vsi *vsi;
611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
613 dev = &rte_eth_devices[port];
615 if (!is_i40e_supported(dev))
618 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
620 if (vf_id >= pf->vf_num || !pf->vfs) {
621 PMD_DRV_LOG(ERR, "Invalid argument.");
625 vsi = pf->vfs[vf_id].vsi;
630 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
631 if (ret != I40E_SUCCESS) {
633 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
639 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
642 struct rte_eth_dev *dev;
645 struct i40e_vsi *vsi;
646 struct i40e_vsi_context ctxt;
649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
651 if (vlan_id > ETHER_MAX_VLAN_ID) {
652 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
656 dev = &rte_eth_devices[port];
658 if (!is_i40e_supported(dev))
661 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662 hw = I40E_PF_TO_HW(pf);
665 * return -ENODEV if SRIOV not enabled, VF number not configured
666 * or no queue assigned.
668 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
672 if (vf_id >= pf->vf_num || !pf->vfs) {
673 PMD_DRV_LOG(ERR, "Invalid VF ID.");
677 vsi = pf->vfs[vf_id].vsi;
679 PMD_DRV_LOG(ERR, "Invalid VSI.");
683 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
684 vsi->info.pvid = vlan_id;
686 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
688 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
690 memset(&ctxt, 0, sizeof(ctxt));
691 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
692 ctxt.seid = vsi->seid;
694 hw = I40E_VSI_TO_HW(vsi);
695 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
696 if (ret != I40E_SUCCESS) {
698 PMD_DRV_LOG(ERR, "Failed to update VSI params");
704 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
707 struct rte_eth_dev *dev;
709 struct i40e_vsi *vsi;
711 struct i40e_mac_filter_info filter;
712 struct ether_addr broadcast = {
713 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
719 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
723 dev = &rte_eth_devices[port];
725 if (!is_i40e_supported(dev))
728 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729 hw = I40E_PF_TO_HW(pf);
731 if (vf_id >= pf->vf_num || !pf->vfs) {
732 PMD_DRV_LOG(ERR, "Invalid VF ID.");
737 * return -ENODEV if SRIOV not enabled, VF number not configured
738 * or no queue assigned.
740 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
741 pf->vf_nb_qps == 0) {
742 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
746 vsi = pf->vfs[vf_id].vsi;
748 PMD_DRV_LOG(ERR, "Invalid VSI.");
753 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
754 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
755 ret = i40e_vsi_add_mac(vsi, &filter);
757 ret = i40e_vsi_delete_mac(vsi, &broadcast);
760 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
762 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
770 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
772 struct rte_eth_dev *dev;
775 struct i40e_vsi *vsi;
776 struct i40e_vsi_context ctxt;
779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
782 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
786 dev = &rte_eth_devices[port];
788 if (!is_i40e_supported(dev))
791 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
792 hw = I40E_PF_TO_HW(pf);
795 * return -ENODEV if SRIOV not enabled, VF number not configured
796 * or no queue assigned.
798 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
799 pf->vf_nb_qps == 0) {
800 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
804 if (vf_id >= pf->vf_num || !pf->vfs) {
805 PMD_DRV_LOG(ERR, "Invalid VF ID.");
809 vsi = pf->vfs[vf_id].vsi;
811 PMD_DRV_LOG(ERR, "Invalid VSI.");
815 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
817 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
818 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
820 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
821 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
824 memset(&ctxt, 0, sizeof(ctxt));
825 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
826 ctxt.seid = vsi->seid;
828 hw = I40E_VSI_TO_HW(vsi);
829 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
830 if (ret != I40E_SUCCESS) {
832 PMD_DRV_LOG(ERR, "Failed to update VSI params");
839 i40e_vlan_filter_count(struct i40e_vsi *vsi)
845 for (j = 0; j < I40E_VFTA_SIZE; j++) {
849 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
850 if (!(vsi->vfta[j] & (1 << k)))
853 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
864 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
865 uint64_t vf_mask, uint8_t on)
867 struct rte_eth_dev *dev;
870 struct i40e_vsi *vsi;
872 int ret = I40E_SUCCESS;
874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
876 dev = &rte_eth_devices[port];
878 if (!is_i40e_supported(dev))
881 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
882 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
887 PMD_DRV_LOG(ERR, "No VF.");
892 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
896 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
897 hw = I40E_PF_TO_HW(pf);
900 * return -ENODEV if SRIOV not enabled, VF number not configured
901 * or no queue assigned.
903 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
904 pf->vf_nb_qps == 0) {
905 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
909 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
910 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
911 vsi = pf->vfs[vf_idx].vsi;
913 if (!vsi->vlan_filter_on) {
914 vsi->vlan_filter_on = true;
915 i40e_aq_set_vsi_vlan_promisc(hw,
919 if (!vsi->vlan_anti_spoof_on)
920 i40e_add_rm_all_vlan_filter(
923 ret = i40e_vsi_add_vlan(vsi, vlan_id);
925 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
927 if (!i40e_vlan_filter_count(vsi)) {
928 vsi->vlan_filter_on = false;
929 i40e_aq_set_vsi_vlan_promisc(hw,
938 if (ret != I40E_SUCCESS) {
940 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
947 rte_pmd_i40e_get_vf_stats(uint16_t port,
949 struct rte_eth_stats *stats)
951 struct rte_eth_dev *dev;
953 struct i40e_vsi *vsi;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
957 dev = &rte_eth_devices[port];
959 if (!is_i40e_supported(dev))
962 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
964 if (vf_id >= pf->vf_num || !pf->vfs) {
965 PMD_DRV_LOG(ERR, "Invalid VF ID.");
969 vsi = pf->vfs[vf_id].vsi;
971 PMD_DRV_LOG(ERR, "Invalid VSI.");
975 i40e_update_vsi_stats(vsi);
977 stats->ipackets = vsi->eth_stats.rx_unicast +
978 vsi->eth_stats.rx_multicast +
979 vsi->eth_stats.rx_broadcast;
980 stats->opackets = vsi->eth_stats.tx_unicast +
981 vsi->eth_stats.tx_multicast +
982 vsi->eth_stats.tx_broadcast;
983 stats->ibytes = vsi->eth_stats.rx_bytes;
984 stats->obytes = vsi->eth_stats.tx_bytes;
985 stats->ierrors = vsi->eth_stats.rx_discards;
986 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
992 rte_pmd_i40e_reset_vf_stats(uint16_t port,
995 struct rte_eth_dev *dev;
997 struct i40e_vsi *vsi;
999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1001 dev = &rte_eth_devices[port];
1003 if (!is_i40e_supported(dev))
1006 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1008 if (vf_id >= pf->vf_num || !pf->vfs) {
1009 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1013 vsi = pf->vfs[vf_id].vsi;
1015 PMD_DRV_LOG(ERR, "Invalid VSI.");
1019 vsi->offset_loaded = false;
1020 i40e_update_vsi_stats(vsi);
1026 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1028 struct rte_eth_dev *dev;
1030 struct i40e_vsi *vsi;
1035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1037 dev = &rte_eth_devices[port];
1039 if (!is_i40e_supported(dev))
1042 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044 if (vf_id >= pf->vf_num || !pf->vfs) {
1045 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1049 vsi = pf->vfs[vf_id].vsi;
1051 PMD_DRV_LOG(ERR, "Invalid VSI.");
1055 if (bw > I40E_QOS_BW_MAX) {
1056 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1061 if (bw % I40E_QOS_BW_GRANULARITY) {
1062 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1063 I40E_QOS_BW_GRANULARITY);
1067 bw /= I40E_QOS_BW_GRANULARITY;
1069 hw = I40E_VSI_TO_HW(vsi);
1072 if (bw == vsi->bw_info.bw_limit) {
1074 "No change for VF max bandwidth. Nothing to do.");
1079 * VF bandwidth limitation and TC bandwidth limitation cannot be
1080 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1082 * If bw is 0, means disable bandwidth limitation. Then no need to
1083 * check TC bandwidth limitation.
1086 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1087 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1088 vsi->bw_info.bw_ets_credits[i])
1091 if (i != I40E_MAX_TRAFFIC_CLASS) {
1093 "TC max bandwidth has been set on this VF,"
1094 " please disable it first.");
1099 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1102 "Failed to set VF %d bandwidth, err(%d).",
1107 /* Store the configuration. */
1108 vsi->bw_info.bw_limit = (uint16_t)bw;
1109 vsi->bw_info.bw_max = 0;
1115 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1116 uint8_t tc_num, uint8_t *bw_weight)
1118 struct rte_eth_dev *dev;
1120 struct i40e_vsi *vsi;
1122 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1126 bool b_change = false;
1128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1130 dev = &rte_eth_devices[port];
1132 if (!is_i40e_supported(dev))
1135 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1137 if (vf_id >= pf->vf_num || !pf->vfs) {
1138 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1142 vsi = pf->vfs[vf_id].vsi;
1144 PMD_DRV_LOG(ERR, "Invalid VSI.");
1148 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1149 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1150 I40E_MAX_TRAFFIC_CLASS);
1155 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1156 if (vsi->enabled_tc & BIT_ULL(i))
1159 if (sum != tc_num) {
1161 "Weight should be set for all %d enabled TCs.",
1167 for (i = 0; i < tc_num; i++) {
1168 if (!bw_weight[i]) {
1170 "The weight should be 1 at least.");
1173 sum += bw_weight[i];
1177 "The summary of the TC weight should be 100.");
1182 * Create the configuration for all the TCs.
1184 memset(&tc_bw, 0, sizeof(tc_bw));
1185 tc_bw.tc_valid_bits = vsi->enabled_tc;
1187 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1188 if (vsi->enabled_tc & BIT_ULL(i)) {
1190 vsi->bw_info.bw_ets_share_credits[i])
1193 tc_bw.tc_bw_credits[i] = bw_weight[j];
1201 "No change for TC allocated bandwidth."
1206 hw = I40E_VSI_TO_HW(vsi);
1208 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1211 "Failed to set VF %d TC bandwidth weight, err(%d).",
1216 /* Store the configuration. */
1218 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1219 if (vsi->enabled_tc & BIT_ULL(i)) {
1220 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1229 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1230 uint8_t tc_no, uint32_t bw)
1232 struct rte_eth_dev *dev;
1234 struct i40e_vsi *vsi;
1236 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1240 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1242 dev = &rte_eth_devices[port];
1244 if (!is_i40e_supported(dev))
1247 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1249 if (vf_id >= pf->vf_num || !pf->vfs) {
1250 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1254 vsi = pf->vfs[vf_id].vsi;
1256 PMD_DRV_LOG(ERR, "Invalid VSI.");
1260 if (bw > I40E_QOS_BW_MAX) {
1261 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1266 if (bw % I40E_QOS_BW_GRANULARITY) {
1267 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1268 I40E_QOS_BW_GRANULARITY);
1272 bw /= I40E_QOS_BW_GRANULARITY;
1274 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1275 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1276 I40E_MAX_TRAFFIC_CLASS);
1280 hw = I40E_VSI_TO_HW(vsi);
1282 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1283 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1289 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1291 "No change for TC max bandwidth. Nothing to do.");
1296 * VF bandwidth limitation and TC bandwidth limitation cannot be
1297 * enabled in parallel, disable VF bandwidth limitation if it's
1299 * If bw is 0, means disable bandwidth limitation. Then no need to
1300 * care about VF bandwidth limitation configuration.
1302 if (bw && vsi->bw_info.bw_limit) {
1303 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1306 "Failed to disable VF(%d)"
1307 " bandwidth limitation, err(%d).",
1313 "VF max bandwidth is disabled according"
1314 " to TC max bandwidth setting.");
1318 * Get all the TCs' info to create a whole picture.
1319 * Because the incremental change isn't permitted.
1321 memset(&tc_bw, 0, sizeof(tc_bw));
1322 tc_bw.tc_valid_bits = vsi->enabled_tc;
1323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1324 if (vsi->enabled_tc & BIT_ULL(i)) {
1325 tc_bw.tc_bw_credits[i] =
1327 vsi->bw_info.bw_ets_credits[i]);
1330 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1332 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1335 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1340 /* Store the configuration. */
1341 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1347 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1349 struct rte_eth_dev *dev;
1351 struct i40e_vsi *vsi;
1352 struct i40e_veb *veb;
1354 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1360 dev = &rte_eth_devices[port];
1362 if (!is_i40e_supported(dev))
1365 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1369 PMD_DRV_LOG(ERR, "Invalid VSI.");
1375 PMD_DRV_LOG(ERR, "Invalid VEB.");
1379 if ((tc_map & veb->enabled_tc) != tc_map) {
1381 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1386 if (tc_map == veb->strict_prio_tc) {
1387 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1391 hw = I40E_VSI_TO_HW(vsi);
1393 /* Disable DCBx if it's the first time to set strict priority. */
1394 if (!veb->strict_prio_tc) {
1395 ret = i40e_aq_stop_lldp(hw, true, NULL);
1398 "Failed to disable DCBx as it's already"
1402 "DCBx is disabled according to strict"
1403 " priority setting.");
1406 memset(&ets_data, 0, sizeof(ets_data));
1407 ets_data.tc_valid_bits = veb->enabled_tc;
1408 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1409 ets_data.tc_strict_priority_flags = tc_map;
1410 /* Get all TCs' bandwidth. */
1411 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412 if (veb->enabled_tc & BIT_ULL(i)) {
1413 /* For rubust, if bandwidth is 0, use 1 instead. */
1414 if (veb->bw_info.bw_ets_share_credits[i])
1415 ets_data.tc_bw_share_credits[i] =
1416 veb->bw_info.bw_ets_share_credits[i];
1418 ets_data.tc_bw_share_credits[i] =
1419 I40E_QOS_BW_WEIGHT_MIN;
1423 if (!veb->strict_prio_tc)
1424 ret = i40e_aq_config_switch_comp_ets(
1425 hw, veb->uplink_seid,
1426 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1429 ret = i40e_aq_config_switch_comp_ets(
1430 hw, veb->uplink_seid,
1431 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1434 ret = i40e_aq_config_switch_comp_ets(
1435 hw, veb->uplink_seid,
1436 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1441 "Failed to set TCs' strict priority mode."
1446 veb->strict_prio_tc = tc_map;
1448 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1450 ret = i40e_aq_start_lldp(hw, NULL);
1453 "Failed to enable DCBx, err(%d).", ret);
1458 "DCBx is enabled again according to strict"
1459 " priority setting.");
1465 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1466 #define I40E_MAX_PROFILE_NUM 16
1469 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1470 uint32_t track_id, uint8_t *profile_info_sec,
1473 struct i40e_profile_section_header *sec = NULL;
1474 struct i40e_profile_info *pinfo;
1476 sec = (struct i40e_profile_section_header *)profile_info_sec;
1478 sec->data_end = sizeof(struct i40e_profile_section_header) +
1479 sizeof(struct i40e_profile_info);
1480 sec->section.type = SECTION_TYPE_INFO;
1481 sec->section.offset = sizeof(struct i40e_profile_section_header);
1482 sec->section.size = sizeof(struct i40e_profile_info);
1483 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1484 sec->section.offset);
1485 pinfo->track_id = track_id;
1486 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1487 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1489 pinfo->op = I40E_DDP_ADD_TRACKID;
1491 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1494 static enum i40e_status_code
1495 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1497 enum i40e_status_code status = I40E_SUCCESS;
1498 struct i40e_profile_section_header *sec;
1500 uint32_t offset = 0;
1503 sec = (struct i40e_profile_section_header *)profile_info_sec;
1504 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1505 sec->section.offset))->track_id;
1507 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1508 track_id, &offset, &info, NULL);
1510 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1511 "offset %d, info %d",
1517 /* Check if the profile info exists */
1519 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1521 struct rte_eth_dev *dev = &rte_eth_devices[port];
1522 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524 struct rte_pmd_i40e_profile_list *p_list;
1525 struct rte_pmd_i40e_profile_info *pinfo, *p;
1528 static const uint32_t group_mask = 0x00ff0000;
1530 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1531 sizeof(struct i40e_profile_section_header));
1532 if (pinfo->track_id == 0) {
1533 PMD_DRV_LOG(INFO, "Read-only profile.");
1536 buff = rte_zmalloc("pinfo_list",
1537 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1540 PMD_DRV_LOG(ERR, "failed to allocate memory");
1544 ret = i40e_aq_get_ddp_list(
1546 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1549 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1553 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1554 for (i = 0; i < p_list->p_count; i++) {
1555 p = &p_list->p_info[i];
1556 if (pinfo->track_id == p->track_id) {
1557 PMD_DRV_LOG(INFO, "Profile exists.");
1562 for (i = 0; i < p_list->p_count; i++) {
1563 p = &p_list->p_info[i];
1564 if ((p->track_id & group_mask) == 0) {
1565 PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1570 for (i = 0; i < p_list->p_count; i++) {
1571 p = &p_list->p_info[i];
1572 if ((pinfo->track_id & group_mask) !=
1573 (p->track_id & group_mask)) {
1574 PMD_DRV_LOG(INFO, "Profile of different group exists.");
1585 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1587 enum rte_pmd_i40e_package_op op)
1589 struct rte_eth_dev *dev;
1591 struct i40e_package_header *pkg_hdr;
1592 struct i40e_generic_seg_header *profile_seg_hdr;
1593 struct i40e_generic_seg_header *metadata_seg_hdr;
1595 uint8_t *profile_info_sec;
1597 enum i40e_status_code status = I40E_SUCCESS;
1598 static const uint32_t type_mask = 0xff000000;
1600 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1601 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1602 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1603 PMD_DRV_LOG(ERR, "Operation not supported.");
1607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1609 dev = &rte_eth_devices[port];
1611 if (!is_i40e_supported(dev))
1614 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1616 if (size < (sizeof(struct i40e_package_header) +
1617 sizeof(struct i40e_metadata_segment) +
1618 sizeof(uint32_t) * 2)) {
1619 PMD_DRV_LOG(ERR, "Buff is invalid.");
1623 pkg_hdr = (struct i40e_package_header *)buff;
1626 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1630 if (pkg_hdr->segment_count < 2) {
1631 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1635 i40e_update_customized_info(dev, buff, size);
1637 /* Find metadata segment */
1638 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1640 if (!metadata_seg_hdr) {
1641 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1644 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1645 if (track_id == I40E_DDP_TRACKID_INVALID) {
1646 PMD_DRV_LOG(ERR, "Invalid track_id");
1650 /* force read-only track_id for type 0 */
1651 if ((track_id & type_mask) == 0)
1654 /* Find profile segment */
1655 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1657 if (!profile_seg_hdr) {
1658 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1662 profile_info_sec = rte_zmalloc(
1663 "i40e_profile_info",
1664 sizeof(struct i40e_profile_section_header) +
1665 sizeof(struct i40e_profile_info),
1667 if (!profile_info_sec) {
1668 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1672 /* Check if the profile already loaded */
1673 i40e_generate_profile_info_sec(
1674 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1675 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1676 track_id, profile_info_sec,
1677 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1678 is_exist = i40e_check_profile_info(port, profile_info_sec);
1680 PMD_DRV_LOG(ERR, "Failed to check profile.");
1681 rte_free(profile_info_sec);
1685 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1688 PMD_DRV_LOG(ERR, "Profile already exists.");
1689 else if (is_exist == 2)
1690 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1691 else if (is_exist == 3)
1692 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1693 rte_free(profile_info_sec);
1696 } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1697 if (is_exist != 1) {
1698 PMD_DRV_LOG(ERR, "Profile does not exist.");
1699 rte_free(profile_info_sec);
1704 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1705 status = i40e_rollback_profile(
1707 (struct i40e_profile_segment *)profile_seg_hdr,
1710 PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1711 rte_free(profile_info_sec);
1715 status = i40e_write_profile(
1717 (struct i40e_profile_segment *)profile_seg_hdr,
1720 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1721 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1723 PMD_DRV_LOG(ERR, "Failed to write profile.");
1724 rte_free(profile_info_sec);
1729 if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1730 /* Modify loaded profiles info list */
1731 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1733 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1734 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1736 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1740 rte_free(profile_info_sec);
1744 /* Get number of tvl records in the section */
1746 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1748 unsigned int i, nb_rec, nb_tlv = 0;
1749 struct i40e_profile_tlv_section_record *tlv;
1754 /* get number of records in the section */
1755 nb_rec = sec->section.size /
1756 sizeof(struct i40e_profile_tlv_section_record);
1757 for (i = 0; i < nb_rec; ) {
1758 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1765 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1766 uint8_t *info_buff, uint32_t info_size,
1767 enum rte_pmd_i40e_package_info type)
1770 struct i40e_package_header *pkg_hdr;
1771 struct i40e_generic_seg_header *i40e_seg_hdr;
1772 struct i40e_generic_seg_header *note_seg_hdr;
1773 struct i40e_generic_seg_header *metadata_seg_hdr;
1776 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1780 if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1781 sizeof(struct i40e_metadata_segment) +
1782 sizeof(uint32_t) * 2)) {
1783 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1787 pkg_hdr = (struct i40e_package_header *)pkg_buff;
1788 if (pkg_hdr->segment_count < 2) {
1789 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1793 /* Find metadata segment */
1794 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1797 /* Find global notes segment */
1798 note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1801 /* Find i40e profile segment */
1802 i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1804 /* get global header info */
1805 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1806 struct rte_pmd_i40e_profile_info *info =
1807 (struct rte_pmd_i40e_profile_info *)info_buff;
1809 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1810 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1814 if (!metadata_seg_hdr) {
1815 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1819 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1820 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1822 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1825 ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1826 I40E_DDP_NAME_SIZE);
1827 memcpy(&info->version,
1828 &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1829 sizeof(struct i40e_ddp_version));
1830 return I40E_SUCCESS;
1833 /* get global note size */
1834 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1835 if (info_size < sizeof(uint32_t)) {
1836 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1839 if (note_seg_hdr == NULL)
1842 ret_size = note_seg_hdr->size;
1843 *(uint32_t *)info_buff = ret_size;
1844 return I40E_SUCCESS;
1847 /* get global note */
1848 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1849 if (note_seg_hdr == NULL)
1851 if (info_size < note_seg_hdr->size) {
1852 PMD_DRV_LOG(ERR, "Information buffer size is too small");
1855 memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size);
1856 return I40E_SUCCESS;
1859 /* get i40e segment header info */
1860 if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1861 struct rte_pmd_i40e_profile_info *info =
1862 (struct rte_pmd_i40e_profile_info *)info_buff;
1864 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1865 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1869 if (!metadata_seg_hdr) {
1870 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1874 if (!i40e_seg_hdr) {
1875 PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1879 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1880 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1882 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1885 ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1886 I40E_DDP_NAME_SIZE);
1887 memcpy(&info->version,
1888 &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1889 sizeof(struct i40e_ddp_version));
1890 return I40E_SUCCESS;
1893 /* get number of devices */
1894 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1895 if (info_size < sizeof(uint32_t)) {
1896 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1899 *(uint32_t *)info_buff =
1900 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1901 return I40E_SUCCESS;
1904 /* get list of devices */
1905 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1908 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1909 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1910 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1914 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1915 sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1916 return I40E_SUCCESS;
1919 /* get number of protocols */
1920 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1921 struct i40e_profile_section_header *proto;
1923 if (info_size < sizeof(uint32_t)) {
1924 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1927 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1928 (struct i40e_profile_segment *)i40e_seg_hdr);
1929 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1930 return I40E_SUCCESS;
1933 /* get list of protocols */
1934 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1935 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1936 struct rte_pmd_i40e_proto_info *pinfo;
1937 struct i40e_profile_section_header *proto;
1938 struct i40e_profile_tlv_section_record *tlv;
1940 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1941 nb_proto_info = info_size /
1942 sizeof(struct rte_pmd_i40e_proto_info);
1943 for (i = 0; i < nb_proto_info; i++) {
1944 pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1945 memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1947 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1948 (struct i40e_profile_segment *)i40e_seg_hdr);
1949 nb_tlv = i40e_get_tlv_section_size(proto);
1951 return I40E_SUCCESS;
1952 if (nb_proto_info < nb_tlv) {
1953 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1956 /* get number of records in the section */
1957 nb_rec = proto->section.size /
1958 sizeof(struct i40e_profile_tlv_section_record);
1959 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1960 for (i = j = 0; i < nb_rec; j++) {
1961 pinfo[j].proto_id = tlv->data[0];
1962 snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1963 (const char *)&tlv->data[1]);
1965 tlv = &tlv[tlv->len];
1967 return I40E_SUCCESS;
1970 /* get number of packet classification types */
1971 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1972 struct i40e_profile_section_header *pctype;
1974 if (info_size < sizeof(uint32_t)) {
1975 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1978 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1979 (struct i40e_profile_segment *)i40e_seg_hdr);
1980 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1981 return I40E_SUCCESS;
1984 /* get list of packet classification types */
1985 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1986 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1987 struct rte_pmd_i40e_ptype_info *pinfo;
1988 struct i40e_profile_section_header *pctype;
1989 struct i40e_profile_tlv_section_record *tlv;
1991 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1992 nb_proto_info = info_size /
1993 sizeof(struct rte_pmd_i40e_ptype_info);
1994 for (i = 0; i < nb_proto_info; i++)
1995 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1996 sizeof(struct rte_pmd_i40e_ptype_info));
1997 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1998 (struct i40e_profile_segment *)i40e_seg_hdr);
1999 nb_tlv = i40e_get_tlv_section_size(pctype);
2001 return I40E_SUCCESS;
2002 if (nb_proto_info < nb_tlv) {
2003 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2007 /* get number of records in the section */
2008 nb_rec = pctype->section.size /
2009 sizeof(struct i40e_profile_tlv_section_record);
2010 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2011 for (i = j = 0; i < nb_rec; j++) {
2012 memcpy(&pinfo[j], tlv->data,
2013 sizeof(struct rte_pmd_i40e_ptype_info));
2015 tlv = &tlv[tlv->len];
2017 return I40E_SUCCESS;
2020 /* get number of packet types */
2021 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2022 struct i40e_profile_section_header *ptype;
2024 if (info_size < sizeof(uint32_t)) {
2025 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2028 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2029 (struct i40e_profile_segment *)i40e_seg_hdr);
2030 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2031 return I40E_SUCCESS;
2034 /* get list of packet types */
2035 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2036 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2037 struct rte_pmd_i40e_ptype_info *pinfo;
2038 struct i40e_profile_section_header *ptype;
2039 struct i40e_profile_tlv_section_record *tlv;
2041 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2042 nb_proto_info = info_size /
2043 sizeof(struct rte_pmd_i40e_ptype_info);
2044 for (i = 0; i < nb_proto_info; i++)
2045 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2046 sizeof(struct rte_pmd_i40e_ptype_info));
2047 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2048 (struct i40e_profile_segment *)i40e_seg_hdr);
2049 nb_tlv = i40e_get_tlv_section_size(ptype);
2051 return I40E_SUCCESS;
2052 if (nb_proto_info < nb_tlv) {
2053 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2056 /* get number of records in the section */
2057 nb_rec = ptype->section.size /
2058 sizeof(struct i40e_profile_tlv_section_record);
2059 for (i = j = 0; i < nb_rec; j++) {
2060 tlv = (struct i40e_profile_tlv_section_record *)
2062 memcpy(&pinfo[j], tlv->data,
2063 sizeof(struct rte_pmd_i40e_ptype_info));
2066 return I40E_SUCCESS;
2069 PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2074 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2076 struct rte_eth_dev *dev;
2078 enum i40e_status_code status = I40E_SUCCESS;
2080 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2082 dev = &rte_eth_devices[port];
2084 if (!is_i40e_supported(dev))
2087 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2090 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2092 status = i40e_aq_get_ddp_list(hw, (void *)buff,
2098 static int check_invalid_pkt_type(uint32_t pkt_type)
2100 uint32_t l2, l3, l4, tnl, il2, il3, il4;
2102 l2 = pkt_type & RTE_PTYPE_L2_MASK;
2103 l3 = pkt_type & RTE_PTYPE_L3_MASK;
2104 l4 = pkt_type & RTE_PTYPE_L4_MASK;
2105 tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2106 il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2107 il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2108 il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2111 l2 != RTE_PTYPE_L2_ETHER &&
2112 l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2113 l2 != RTE_PTYPE_L2_ETHER_ARP &&
2114 l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2115 l2 != RTE_PTYPE_L2_ETHER_NSH &&
2116 l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2117 l2 != RTE_PTYPE_L2_ETHER_QINQ)
2121 l3 != RTE_PTYPE_L3_IPV4 &&
2122 l3 != RTE_PTYPE_L3_IPV4_EXT &&
2123 l3 != RTE_PTYPE_L3_IPV6 &&
2124 l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2125 l3 != RTE_PTYPE_L3_IPV6_EXT &&
2126 l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2130 l4 != RTE_PTYPE_L4_TCP &&
2131 l4 != RTE_PTYPE_L4_UDP &&
2132 l4 != RTE_PTYPE_L4_FRAG &&
2133 l4 != RTE_PTYPE_L4_SCTP &&
2134 l4 != RTE_PTYPE_L4_ICMP &&
2135 l4 != RTE_PTYPE_L4_NONFRAG)
2139 tnl != RTE_PTYPE_TUNNEL_IP &&
2140 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2141 tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2142 tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2143 tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2144 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2145 tnl != RTE_PTYPE_TUNNEL_GTPC &&
2146 tnl != RTE_PTYPE_TUNNEL_GTPU)
2150 il2 != RTE_PTYPE_INNER_L2_ETHER &&
2151 il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2152 il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2156 il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2157 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2158 il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2159 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2160 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2161 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2165 il4 != RTE_PTYPE_INNER_L4_TCP &&
2166 il4 != RTE_PTYPE_INNER_L4_UDP &&
2167 il4 != RTE_PTYPE_INNER_L4_FRAG &&
2168 il4 != RTE_PTYPE_INNER_L4_SCTP &&
2169 il4 != RTE_PTYPE_INNER_L4_ICMP &&
2170 il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2176 static int check_invalid_ptype_mapping(
2177 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2182 for (i = 0; i < count; i++) {
2183 uint16_t ptype = mapping_table[i].hw_ptype;
2184 uint32_t pkt_type = mapping_table[i].sw_ptype;
2186 if (ptype >= I40E_MAX_PKT_TYPE)
2189 if (pkt_type == RTE_PTYPE_UNKNOWN)
2192 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2195 if (check_invalid_pkt_type(pkt_type))
2203 rte_pmd_i40e_ptype_mapping_update(
2205 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2209 struct rte_eth_dev *dev;
2210 struct i40e_adapter *ad;
2213 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2215 dev = &rte_eth_devices[port];
2217 if (!is_i40e_supported(dev))
2220 if (count > I40E_MAX_PKT_TYPE)
2223 if (check_invalid_ptype_mapping(mapping_items, count))
2226 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2229 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2230 ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2233 for (i = 0; i < count; i++)
2234 ad->ptype_tbl[mapping_items[i].hw_ptype]
2235 = mapping_items[i].sw_ptype;
2240 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2242 struct rte_eth_dev *dev;
2244 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2246 dev = &rte_eth_devices[port];
2248 if (!is_i40e_supported(dev))
2251 i40e_set_default_ptype_table(dev);
2256 int rte_pmd_i40e_ptype_mapping_get(
2258 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2263 struct rte_eth_dev *dev;
2264 struct i40e_adapter *ad;
2268 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2270 dev = &rte_eth_devices[port];
2272 if (!is_i40e_supported(dev))
2275 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2277 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2280 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2282 mapping_items[n].hw_ptype = i;
2283 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2291 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2296 struct rte_eth_dev *dev;
2297 struct i40e_adapter *ad;
2300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2302 dev = &rte_eth_devices[port];
2304 if (!is_i40e_supported(dev))
2307 if (!mask && check_invalid_pkt_type(target))
2310 if (check_invalid_pkt_type(pkt_type))
2313 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2315 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2317 if ((target | ad->ptype_tbl[i]) == target &&
2318 (target & ad->ptype_tbl[i]))
2319 ad->ptype_tbl[i] = pkt_type;
2321 if (ad->ptype_tbl[i] == target)
2322 ad->ptype_tbl[i] = pkt_type;
2330 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2331 struct ether_addr *mac_addr)
2333 struct rte_eth_dev *dev;
2334 struct i40e_pf_vf *vf;
2335 struct i40e_vsi *vsi;
2337 struct i40e_mac_filter_info mac_filter;
2340 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2345 dev = &rte_eth_devices[port];
2347 if (!is_i40e_supported(dev))
2350 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2352 if (vf_id >= pf->vf_num || !pf->vfs)
2355 vf = &pf->vfs[vf_id];
2358 PMD_DRV_LOG(ERR, "Invalid VSI.");
2362 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2363 ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2364 ret = i40e_vsi_add_mac(vsi, &mac_filter);
2365 if (ret != I40E_SUCCESS) {
2366 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2373 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2375 struct rte_eth_dev *dev;
2377 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2379 dev = &rte_eth_devices[port];
2381 if (!is_i40e_supported(dev))
2384 i40e_set_default_pctype_table(dev);
2389 int rte_pmd_i40e_flow_type_mapping_get(
2391 struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2393 struct rte_eth_dev *dev;
2394 struct i40e_adapter *ad;
2397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2399 dev = &rte_eth_devices[port];
2401 if (!is_i40e_supported(dev))
2404 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2406 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2407 mapping_items[i].flow_type = i;
2408 mapping_items[i].pctype = ad->pctypes_tbl[i];
2415 rte_pmd_i40e_flow_type_mapping_update(
2417 struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2421 struct rte_eth_dev *dev;
2422 struct i40e_adapter *ad;
2425 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2427 dev = &rte_eth_devices[port];
2429 if (!is_i40e_supported(dev))
2432 if (count > I40E_FLOW_TYPE_MAX)
2435 for (i = 0; i < count; i++)
2436 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2437 mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2438 (mapping_items[i].pctype &
2439 (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2442 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2445 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2446 ad->pctypes_tbl[i] = 0ULL;
2447 ad->flow_types_mask = 0ULL;
2450 for (i = 0; i < count; i++) {
2451 ad->pctypes_tbl[mapping_items[i].flow_type] =
2452 mapping_items[i].pctype;
2453 if (mapping_items[i].pctype)
2454 ad->flow_types_mask |=
2455 (1ULL << mapping_items[i].flow_type);
2457 ad->flow_types_mask &=
2458 ~(1ULL << mapping_items[i].flow_type);
2461 for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2462 ad->pctypes_mask |= ad->pctypes_tbl[i];
2468 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2470 struct rte_eth_dev *dev;
2471 struct ether_addr *mac;
2474 struct i40e_pf_vf *vf;
2477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2478 dev = &rte_eth_devices[port];
2480 if (!is_i40e_supported(dev))
2483 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2484 vf_num = pf->vf_num;
2486 for (vf_id = 0; vf_id < vf_num; vf_id++) {
2487 vf = &pf->vfs[vf_id];
2488 mac = &vf->mac_addr;
2490 if (is_same_ether_addr(mac, vf_mac))
2498 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2502 struct i40e_vsi *vsi = pf->main_vsi;
2503 uint16_t queue_offset, bsf, tc_index;
2504 struct i40e_vsi_context ctxt;
2505 struct i40e_aqc_vsi_properties_data *vsi_info;
2506 struct i40e_queue_regions *region_info =
2508 int32_t ret = -EINVAL;
2510 if (!region_info->queue_region_number) {
2511 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2515 memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2517 /* Update Queue Pairs Mapping for currently enabled UPs */
2518 ctxt.seid = vsi->seid;
2519 ctxt.pf_num = hw->pf_id;
2521 ctxt.uplink_seid = vsi->uplink_seid;
2522 ctxt.info = vsi->info;
2523 vsi_info = &ctxt.info;
2525 memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2526 memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2528 /* Configure queue region and queue mapping parameters,
2529 * for enabled queue region, allocate queues to this region.
2532 for (i = 0; i < region_info->queue_region_number; i++) {
2533 tc_index = region_info->region[i].region_id;
2534 bsf = rte_bsf32(region_info->region[i].queue_num);
2535 queue_offset = region_info->region[i].queue_start_index;
2536 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2537 (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2538 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2541 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2542 vsi_info->mapping_flags |=
2543 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2544 vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2545 vsi_info->valid_sections |=
2546 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2548 /* Update the VSI after updating the VSI queue-mapping information */
2549 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2551 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2552 hw->aq.asq_last_status);
2555 /* update the local VSI info with updated queue map */
2556 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2557 sizeof(vsi->info.tc_mapping));
2558 rte_memcpy(&vsi->info.queue_mapping,
2559 &ctxt.info.queue_mapping,
2560 sizeof(vsi->info.queue_mapping));
2561 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2562 vsi->info.valid_sections = 0;
2569 i40e_queue_region_set_region(struct i40e_pf *pf,
2570 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2573 struct i40e_vsi *main_vsi = pf->main_vsi;
2574 struct i40e_queue_regions *info = &pf->queue_region;
2575 int32_t ret = -EINVAL;
2577 if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2578 conf_ptr->queue_num <= 64)) {
2579 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2580 "total number of queues do not exceed the VSI allocation");
2584 if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2585 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2589 if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2590 > main_vsi->nb_used_qps) {
2591 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2595 for (i = 0; i < info->queue_region_number; i++)
2596 if (conf_ptr->region_id == info->region[i].region_id)
2599 if (i == info->queue_region_number &&
2600 i <= I40E_REGION_MAX_INDEX) {
2601 info->region[i].region_id = conf_ptr->region_id;
2602 info->region[i].queue_num = conf_ptr->queue_num;
2603 info->region[i].queue_start_index =
2604 conf_ptr->queue_start_index;
2605 info->queue_region_number++;
2607 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2615 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2616 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2618 int32_t ret = -EINVAL;
2619 struct i40e_queue_regions *info = &pf->queue_region;
2621 uint16_t region_index, flowtype_index;
2623 /* For the pctype or hardware flowtype of packet,
2624 * the specific index for each type has been defined
2625 * in file i40e_type.h as enum i40e_filter_pctype.
2628 if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2629 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2633 if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2634 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2639 for (i = 0; i < info->queue_region_number; i++)
2640 if (rss_region_conf->region_id == info->region[i].region_id)
2643 if (i == info->queue_region_number) {
2644 PMD_DRV_LOG(ERR, "that region id has not been set before");
2650 for (i = 0; i < info->queue_region_number; i++) {
2651 for (j = 0; j < info->region[i].flowtype_num; j++) {
2652 if (rss_region_conf->hw_flowtype ==
2653 info->region[i].hw_flowtype[j]) {
2654 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2660 flowtype_index = info->region[region_index].flowtype_num;
2661 info->region[region_index].hw_flowtype[flowtype_index] =
2662 rss_region_conf->hw_flowtype;
2663 info->region[region_index].flowtype_num++;
2669 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2672 uint8_t hw_flowtype;
2673 uint32_t pfqf_hregion;
2674 uint16_t i, j, index;
2675 struct i40e_queue_regions *info = &pf->queue_region;
2677 /* For the pctype or hardware flowtype of packet,
2678 * the specific index for each type has been defined
2679 * in file i40e_type.h as enum i40e_filter_pctype.
2682 for (i = 0; i < info->queue_region_number; i++) {
2683 for (j = 0; j < info->region[i].flowtype_num; j++) {
2684 hw_flowtype = info->region[i].hw_flowtype[j];
2685 index = hw_flowtype >> 3;
2687 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2689 if ((hw_flowtype & 0x7) == 0) {
2690 pfqf_hregion |= info->region[i].region_id <<
2691 I40E_PFQF_HREGION_REGION_0_SHIFT;
2692 pfqf_hregion |= 1 <<
2693 I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2694 } else if ((hw_flowtype & 0x7) == 1) {
2695 pfqf_hregion |= info->region[i].region_id <<
2696 I40E_PFQF_HREGION_REGION_1_SHIFT;
2697 pfqf_hregion |= 1 <<
2698 I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2699 } else if ((hw_flowtype & 0x7) == 2) {
2700 pfqf_hregion |= info->region[i].region_id <<
2701 I40E_PFQF_HREGION_REGION_2_SHIFT;
2702 pfqf_hregion |= 1 <<
2703 I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2704 } else if ((hw_flowtype & 0x7) == 3) {
2705 pfqf_hregion |= info->region[i].region_id <<
2706 I40E_PFQF_HREGION_REGION_3_SHIFT;
2707 pfqf_hregion |= 1 <<
2708 I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2709 } else if ((hw_flowtype & 0x7) == 4) {
2710 pfqf_hregion |= info->region[i].region_id <<
2711 I40E_PFQF_HREGION_REGION_4_SHIFT;
2712 pfqf_hregion |= 1 <<
2713 I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2714 } else if ((hw_flowtype & 0x7) == 5) {
2715 pfqf_hregion |= info->region[i].region_id <<
2716 I40E_PFQF_HREGION_REGION_5_SHIFT;
2717 pfqf_hregion |= 1 <<
2718 I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2719 } else if ((hw_flowtype & 0x7) == 6) {
2720 pfqf_hregion |= info->region[i].region_id <<
2721 I40E_PFQF_HREGION_REGION_6_SHIFT;
2722 pfqf_hregion |= 1 <<
2723 I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2725 pfqf_hregion |= info->region[i].region_id <<
2726 I40E_PFQF_HREGION_REGION_7_SHIFT;
2727 pfqf_hregion |= 1 <<
2728 I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2731 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2738 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2739 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2741 struct i40e_queue_regions *info = &pf->queue_region;
2742 int32_t ret = -EINVAL;
2743 uint16_t i, j, region_index;
2745 if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2746 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2750 if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2751 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2755 for (i = 0; i < info->queue_region_number; i++)
2756 if (rss_region_conf->region_id == info->region[i].region_id)
2759 if (i == info->queue_region_number) {
2760 PMD_DRV_LOG(ERR, "that region id has not been set before");
2767 for (i = 0; i < info->queue_region_number; i++) {
2768 for (j = 0; j < info->region[i].user_priority_num; j++) {
2769 if (info->region[i].user_priority[j] ==
2770 rss_region_conf->user_priority) {
2771 PMD_DRV_LOG(ERR, "that user priority has been set before");
2777 j = info->region[region_index].user_priority_num;
2778 info->region[region_index].user_priority[j] =
2779 rss_region_conf->user_priority;
2780 info->region[region_index].user_priority_num++;
2786 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2789 struct i40e_dcbx_config dcb_cfg_local;
2790 struct i40e_dcbx_config *dcb_cfg;
2791 struct i40e_queue_regions *info = &pf->queue_region;
2792 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2793 int32_t ret = -EINVAL;
2794 uint16_t i, j, prio_index, region_index;
2795 uint8_t tc_map, tc_bw, bw_lf;
2797 if (!info->queue_region_number) {
2798 PMD_DRV_LOG(ERR, "No queue region been set before");
2802 dcb_cfg = &dcb_cfg_local;
2803 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2805 /* assume each tc has the same bw */
2806 tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2807 for (i = 0; i < info->queue_region_number; i++)
2808 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2809 /* to ensure the sum of tcbw is equal to 100 */
2810 bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
2811 for (i = 0; i < bw_lf; i++)
2812 dcb_cfg->etscfg.tcbwtable[i]++;
2814 /* assume each tc has the same Transmission Selection Algorithm */
2815 for (i = 0; i < info->queue_region_number; i++)
2816 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2818 for (i = 0; i < info->queue_region_number; i++) {
2819 for (j = 0; j < info->region[i].user_priority_num; j++) {
2820 prio_index = info->region[i].user_priority[j];
2821 region_index = info->region[i].region_id;
2822 dcb_cfg->etscfg.prioritytable[prio_index] =
2827 /* FW needs one App to configure HW */
2828 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2829 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2830 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2831 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2833 tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2835 dcb_cfg->pfc.willing = 0;
2836 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2837 dcb_cfg->pfc.pfcenable = tc_map;
2839 /* Copy the new config to the current config */
2840 *old_cfg = *dcb_cfg;
2841 old_cfg->etsrec = old_cfg->etscfg;
2842 ret = i40e_set_dcb_config(hw);
2845 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2846 i40e_stat_str(hw, ret),
2847 i40e_aq_str(hw, hw->aq.asq_last_status));
2855 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2856 struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2858 int32_t ret = -EINVAL;
2859 struct i40e_queue_regions *info = &pf->queue_region;
2860 struct i40e_vsi *main_vsi = pf->main_vsi;
2863 i40e_queue_region_pf_flowtype_conf(hw, pf);
2865 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2866 if (ret != I40E_SUCCESS) {
2867 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2871 ret = i40e_queue_region_dcb_configure(hw, pf);
2872 if (ret != I40E_SUCCESS) {
2873 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2880 if (info->queue_region_number) {
2881 info->queue_region_number = 1;
2882 info->region[0].queue_num = main_vsi->nb_used_qps;
2883 info->region[0].queue_start_index = 0;
2885 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2886 if (ret != I40E_SUCCESS)
2887 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2889 ret = i40e_dcb_init_configure(dev, TRUE);
2890 if (ret != I40E_SUCCESS) {
2891 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2892 pf->flags &= ~I40E_FLAG_DCB;
2895 i40e_init_queue_region_conf(dev);
2901 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2903 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2906 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2907 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2916 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2917 struct i40e_queue_regions *regions_ptr)
2919 struct i40e_queue_regions *info = &pf->queue_region;
2921 rte_memcpy(regions_ptr, info,
2922 sizeof(struct i40e_queue_regions));
2927 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2928 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2930 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2931 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2932 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2937 if (!is_i40e_supported(dev))
2940 if (!(!i40e_queue_region_pf_check_rss(pf)))
2943 /* This queue region feature only support pf by now. It should
2944 * be called after dev_start, and will be clear after dev_stop.
2945 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2946 * is just an enable function which server for other configuration,
2947 * it is for all configuration about queue region from up layer,
2948 * at first will only keep in DPDK softwarestored in driver,
2949 * only after "FLUSH_ON", it commit all configuration to HW.
2950 * Because PMD had to set hardware configuration at a time, so
2951 * it will record all up layer command at first.
2952 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2953 * just clean all configuration about queue region just now,
2954 * and restore all to DPDK i40e driver default
2955 * config when start up.
2959 case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2960 ret = i40e_queue_region_set_region(pf,
2961 (struct rte_pmd_i40e_queue_region_conf *)arg);
2963 case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2964 ret = i40e_queue_region_set_flowtype(pf,
2965 (struct rte_pmd_i40e_queue_region_conf *)arg);
2967 case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2968 ret = i40e_queue_region_set_user_priority(pf,
2969 (struct rte_pmd_i40e_queue_region_conf *)arg);
2971 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2972 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2974 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2975 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2977 case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2978 ret = i40e_queue_region_get_all_info(pf,
2979 (struct i40e_queue_regions *)arg);
2982 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2987 I40E_WRITE_FLUSH(hw);
2992 int rte_pmd_i40e_flow_add_del_packet_template(
2994 const struct rte_pmd_i40e_pkt_template_conf *conf,
2997 struct rte_eth_dev *dev = &rte_eth_devices[port];
2998 struct i40e_fdir_filter_conf filter_conf;
3000 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3002 if (!is_i40e_supported(dev))
3005 memset(&filter_conf, 0, sizeof(filter_conf));
3006 filter_conf.soft_id = conf->soft_id;
3007 filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3008 filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3009 filter_conf.input.flow.raw_flow.length = conf->input.length;
3010 filter_conf.input.flow_ext.pkt_template = true;
3012 filter_conf.action.rx_queue = conf->action.rx_queue;
3013 filter_conf.action.behavior =
3014 (enum i40e_fdir_behavior)conf->action.behavior;
3015 filter_conf.action.report_status =
3016 (enum i40e_fdir_status)conf->action.report_status;
3017 filter_conf.action.flex_off = conf->action.flex_off;
3019 return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);