4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "rte_pmd_i40e.h"
45 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
47 struct rte_eth_dev *dev;
50 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52 dev = &rte_eth_devices[port];
54 if (!is_i40e_supported(dev))
57 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59 if (vf >= pf->vf_num || !pf->vfs) {
60 PMD_DRV_LOG(ERR, "Invalid argument.");
64 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
70 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
72 struct rte_eth_dev *dev;
76 struct i40e_vsi_context ctxt;
79 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
81 dev = &rte_eth_devices[port];
83 if (!is_i40e_supported(dev))
86 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
88 if (vf_id >= pf->vf_num || !pf->vfs) {
89 PMD_DRV_LOG(ERR, "Invalid argument.");
93 vsi = pf->vfs[vf_id].vsi;
95 PMD_DRV_LOG(ERR, "Invalid VSI.");
99 /* Check if it has been already on or off */
100 if (vsi->info.valid_sections &
101 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
103 if ((vsi->info.sec_flags &
104 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
105 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
106 return 0; /* already on */
108 if ((vsi->info.sec_flags &
109 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
110 return 0; /* already off */
114 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
116 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
118 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
120 memset(&ctxt, 0, sizeof(ctxt));
121 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
122 ctxt.seid = vsi->seid;
124 hw = I40E_VSI_TO_HW(vsi);
125 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
126 if (ret != I40E_SUCCESS) {
128 PMD_DRV_LOG(ERR, "Failed to update VSI params");
135 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
139 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
140 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
143 for (j = 0; j < I40E_VFTA_SIZE; j++) {
147 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
148 if (!(vsi->vfta[j] & (1 << k)))
151 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
155 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
157 ret = i40e_aq_add_vlan(hw, vsi->seid,
158 &vlan_data, 1, NULL);
160 ret = i40e_aq_remove_vlan(hw, vsi->seid,
161 &vlan_data, 1, NULL);
162 if (ret != I40E_SUCCESS) {
164 "Failed to add/rm vlan filter");
174 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
176 struct rte_eth_dev *dev;
178 struct i40e_vsi *vsi;
180 struct i40e_vsi_context ctxt;
183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
185 dev = &rte_eth_devices[port];
187 if (!is_i40e_supported(dev))
190 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
192 if (vf_id >= pf->vf_num || !pf->vfs) {
193 PMD_DRV_LOG(ERR, "Invalid argument.");
197 vsi = pf->vfs[vf_id].vsi;
199 PMD_DRV_LOG(ERR, "Invalid VSI.");
203 /* Check if it has been already on or off */
204 if (vsi->vlan_anti_spoof_on == on)
205 return 0; /* already on or off */
207 vsi->vlan_anti_spoof_on = on;
208 if (!vsi->vlan_filter_on) {
209 ret = i40e_add_rm_all_vlan_filter(vsi, on);
211 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
216 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
218 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
220 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
222 memset(&ctxt, 0, sizeof(ctxt));
223 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
224 ctxt.seid = vsi->seid;
226 hw = I40E_VSI_TO_HW(vsi);
227 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
228 if (ret != I40E_SUCCESS) {
230 PMD_DRV_LOG(ERR, "Failed to update VSI params");
237 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
239 struct i40e_mac_filter *f;
240 struct i40e_macvlan_filter *mv_f;
242 enum rte_mac_filter_type filter_type;
243 int ret = I40E_SUCCESS;
246 /* remove all the MACs */
247 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
248 vlan_num = vsi->vlan_num;
249 filter_type = f->mac_info.filter_type;
250 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
251 filter_type == RTE_MACVLAN_HASH_MATCH) {
253 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
254 return I40E_ERR_PARAM;
256 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
257 filter_type == RTE_MAC_HASH_MATCH)
260 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
262 PMD_DRV_LOG(ERR, "failed to allocate memory");
263 return I40E_ERR_NO_MEMORY;
266 for (i = 0; i < vlan_num; i++) {
267 mv_f[i].filter_type = filter_type;
268 rte_memcpy(&mv_f[i].macaddr,
269 &f->mac_info.mac_addr,
272 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
273 filter_type == RTE_MACVLAN_HASH_MATCH) {
274 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
275 &f->mac_info.mac_addr);
276 if (ret != I40E_SUCCESS) {
282 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
283 if (ret != I40E_SUCCESS) {
296 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
298 struct i40e_mac_filter *f;
299 struct i40e_macvlan_filter *mv_f;
301 int ret = I40E_SUCCESS;
304 /* restore all the MACs */
305 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
306 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
307 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
309 * If vlan_num is 0, that's the first time to add mac,
310 * set mask for vlan_id 0.
312 if (vsi->vlan_num == 0) {
313 i40e_set_vlan_filter(vsi, 0, 1);
316 vlan_num = vsi->vlan_num;
317 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
318 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
321 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
323 PMD_DRV_LOG(ERR, "failed to allocate memory");
324 return I40E_ERR_NO_MEMORY;
327 for (i = 0; i < vlan_num; i++) {
328 mv_f[i].filter_type = f->mac_info.filter_type;
329 rte_memcpy(&mv_f[i].macaddr,
330 &f->mac_info.mac_addr,
334 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
335 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
336 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
337 &f->mac_info.mac_addr);
338 if (ret != I40E_SUCCESS) {
344 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
345 if (ret != I40E_SUCCESS) {
358 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
360 struct i40e_vsi_context ctxt;
367 hw = I40E_VSI_TO_HW(vsi);
369 /* Use the FW API if FW >= v5.0 */
370 if (hw->aq.fw_maj_ver < 5) {
371 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
375 /* Check if it has been already on or off */
376 if (vsi->info.valid_sections &
377 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
379 if ((vsi->info.switch_id &
380 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
381 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
382 return 0; /* already on */
384 if ((vsi->info.switch_id &
385 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
386 return 0; /* already off */
390 /* remove all the MAC and VLAN first */
391 ret = i40e_vsi_rm_mac_filter(vsi);
393 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
396 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
399 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
404 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
406 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
408 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
410 memset(&ctxt, 0, sizeof(ctxt));
411 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
412 ctxt.seid = vsi->seid;
414 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
415 if (ret != I40E_SUCCESS) {
416 PMD_DRV_LOG(ERR, "Failed to update VSI params");
420 /* add all the MAC and VLAN back */
421 ret = i40e_vsi_restore_mac_filter(vsi);
424 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
425 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
434 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
436 struct rte_eth_dev *dev;
438 struct i40e_pf_vf *vf;
439 struct i40e_vsi *vsi;
443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
445 dev = &rte_eth_devices[port];
447 if (!is_i40e_supported(dev))
450 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
452 /* setup PF TX loopback */
454 ret = i40e_vsi_set_tx_loopback(vsi, on);
458 /* setup TX loopback for all the VFs */
460 /* if no VF, do nothing. */
464 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
465 vf = &pf->vfs[vf_id];
468 ret = i40e_vsi_set_tx_loopback(vsi, on);
477 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
479 struct rte_eth_dev *dev;
481 struct i40e_vsi *vsi;
485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
487 dev = &rte_eth_devices[port];
489 if (!is_i40e_supported(dev))
492 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
494 if (vf_id >= pf->vf_num || !pf->vfs) {
495 PMD_DRV_LOG(ERR, "Invalid argument.");
499 vsi = pf->vfs[vf_id].vsi;
501 PMD_DRV_LOG(ERR, "Invalid VSI.");
505 hw = I40E_VSI_TO_HW(vsi);
507 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
509 if (ret != I40E_SUCCESS) {
511 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
518 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
520 struct rte_eth_dev *dev;
522 struct i40e_vsi *vsi;
526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
528 dev = &rte_eth_devices[port];
530 if (!is_i40e_supported(dev))
533 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
535 if (vf_id >= pf->vf_num || !pf->vfs) {
536 PMD_DRV_LOG(ERR, "Invalid argument.");
540 vsi = pf->vfs[vf_id].vsi;
542 PMD_DRV_LOG(ERR, "Invalid VSI.");
546 hw = I40E_VSI_TO_HW(vsi);
548 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
550 if (ret != I40E_SUCCESS) {
552 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
559 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
560 struct ether_addr *mac_addr)
562 struct i40e_mac_filter *f;
563 struct rte_eth_dev *dev;
564 struct i40e_pf_vf *vf;
565 struct i40e_vsi *vsi;
569 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
574 dev = &rte_eth_devices[port];
576 if (!is_i40e_supported(dev))
579 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
581 if (vf_id >= pf->vf_num || !pf->vfs)
584 vf = &pf->vfs[vf_id];
587 PMD_DRV_LOG(ERR, "Invalid VSI.");
591 ether_addr_copy(mac_addr, &vf->mac_addr);
593 /* Remove all existing mac */
594 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
595 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
597 PMD_DRV_LOG(WARNING, "Delete MAC failed");
602 /* Set vlan strip on/off for specific VF from host */
604 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
606 struct rte_eth_dev *dev;
608 struct i40e_vsi *vsi;
611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
613 dev = &rte_eth_devices[port];
615 if (!is_i40e_supported(dev))
618 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
620 if (vf_id >= pf->vf_num || !pf->vfs) {
621 PMD_DRV_LOG(ERR, "Invalid argument.");
625 vsi = pf->vfs[vf_id].vsi;
630 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
631 if (ret != I40E_SUCCESS) {
633 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
639 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
642 struct rte_eth_dev *dev;
645 struct i40e_vsi *vsi;
646 struct i40e_vsi_context ctxt;
649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
651 if (vlan_id > ETHER_MAX_VLAN_ID) {
652 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
656 dev = &rte_eth_devices[port];
658 if (!is_i40e_supported(dev))
661 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662 hw = I40E_PF_TO_HW(pf);
665 * return -ENODEV if SRIOV not enabled, VF number not configured
666 * or no queue assigned.
668 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
672 if (vf_id >= pf->vf_num || !pf->vfs) {
673 PMD_DRV_LOG(ERR, "Invalid VF ID.");
677 vsi = pf->vfs[vf_id].vsi;
679 PMD_DRV_LOG(ERR, "Invalid VSI.");
683 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
684 vsi->info.pvid = vlan_id;
686 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
688 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
690 memset(&ctxt, 0, sizeof(ctxt));
691 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
692 ctxt.seid = vsi->seid;
694 hw = I40E_VSI_TO_HW(vsi);
695 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
696 if (ret != I40E_SUCCESS) {
698 PMD_DRV_LOG(ERR, "Failed to update VSI params");
704 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
707 struct rte_eth_dev *dev;
709 struct i40e_vsi *vsi;
711 struct i40e_mac_filter_info filter;
712 struct ether_addr broadcast = {
713 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
719 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
723 dev = &rte_eth_devices[port];
725 if (!is_i40e_supported(dev))
728 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729 hw = I40E_PF_TO_HW(pf);
731 if (vf_id >= pf->vf_num || !pf->vfs) {
732 PMD_DRV_LOG(ERR, "Invalid VF ID.");
737 * return -ENODEV if SRIOV not enabled, VF number not configured
738 * or no queue assigned.
740 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
741 pf->vf_nb_qps == 0) {
742 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
746 vsi = pf->vfs[vf_id].vsi;
748 PMD_DRV_LOG(ERR, "Invalid VSI.");
753 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
754 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
755 ret = i40e_vsi_add_mac(vsi, &filter);
757 ret = i40e_vsi_delete_mac(vsi, &broadcast);
760 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
762 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
770 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
772 struct rte_eth_dev *dev;
775 struct i40e_vsi *vsi;
776 struct i40e_vsi_context ctxt;
779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
782 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
786 dev = &rte_eth_devices[port];
788 if (!is_i40e_supported(dev))
791 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
792 hw = I40E_PF_TO_HW(pf);
795 * return -ENODEV if SRIOV not enabled, VF number not configured
796 * or no queue assigned.
798 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
799 pf->vf_nb_qps == 0) {
800 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
804 if (vf_id >= pf->vf_num || !pf->vfs) {
805 PMD_DRV_LOG(ERR, "Invalid VF ID.");
809 vsi = pf->vfs[vf_id].vsi;
811 PMD_DRV_LOG(ERR, "Invalid VSI.");
815 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
817 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
818 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
820 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
821 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
824 memset(&ctxt, 0, sizeof(ctxt));
825 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
826 ctxt.seid = vsi->seid;
828 hw = I40E_VSI_TO_HW(vsi);
829 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
830 if (ret != I40E_SUCCESS) {
832 PMD_DRV_LOG(ERR, "Failed to update VSI params");
839 i40e_vlan_filter_count(struct i40e_vsi *vsi)
845 for (j = 0; j < I40E_VFTA_SIZE; j++) {
849 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
850 if (!(vsi->vfta[j] & (1 << k)))
853 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
864 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
865 uint64_t vf_mask, uint8_t on)
867 struct rte_eth_dev *dev;
870 struct i40e_vsi *vsi;
872 int ret = I40E_SUCCESS;
874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
876 dev = &rte_eth_devices[port];
878 if (!is_i40e_supported(dev))
881 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
882 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
887 PMD_DRV_LOG(ERR, "No VF.");
892 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
896 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
897 hw = I40E_PF_TO_HW(pf);
900 * return -ENODEV if SRIOV not enabled, VF number not configured
901 * or no queue assigned.
903 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
904 pf->vf_nb_qps == 0) {
905 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
909 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
910 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
911 vsi = pf->vfs[vf_idx].vsi;
913 if (!vsi->vlan_filter_on) {
914 vsi->vlan_filter_on = true;
915 i40e_aq_set_vsi_vlan_promisc(hw,
919 if (!vsi->vlan_anti_spoof_on)
920 i40e_add_rm_all_vlan_filter(
923 ret = i40e_vsi_add_vlan(vsi, vlan_id);
925 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
927 if (!i40e_vlan_filter_count(vsi)) {
928 vsi->vlan_filter_on = false;
929 i40e_aq_set_vsi_vlan_promisc(hw,
938 if (ret != I40E_SUCCESS) {
940 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
947 rte_pmd_i40e_get_vf_stats(uint16_t port,
949 struct rte_eth_stats *stats)
951 struct rte_eth_dev *dev;
953 struct i40e_vsi *vsi;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
957 dev = &rte_eth_devices[port];
959 if (!is_i40e_supported(dev))
962 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
964 if (vf_id >= pf->vf_num || !pf->vfs) {
965 PMD_DRV_LOG(ERR, "Invalid VF ID.");
969 vsi = pf->vfs[vf_id].vsi;
971 PMD_DRV_LOG(ERR, "Invalid VSI.");
975 i40e_update_vsi_stats(vsi);
977 stats->ipackets = vsi->eth_stats.rx_unicast +
978 vsi->eth_stats.rx_multicast +
979 vsi->eth_stats.rx_broadcast;
980 stats->opackets = vsi->eth_stats.tx_unicast +
981 vsi->eth_stats.tx_multicast +
982 vsi->eth_stats.tx_broadcast;
983 stats->ibytes = vsi->eth_stats.rx_bytes;
984 stats->obytes = vsi->eth_stats.tx_bytes;
985 stats->ierrors = vsi->eth_stats.rx_discards;
986 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
992 rte_pmd_i40e_reset_vf_stats(uint16_t port,
995 struct rte_eth_dev *dev;
997 struct i40e_vsi *vsi;
999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1001 dev = &rte_eth_devices[port];
1003 if (!is_i40e_supported(dev))
1006 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1008 if (vf_id >= pf->vf_num || !pf->vfs) {
1009 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1013 vsi = pf->vfs[vf_id].vsi;
1015 PMD_DRV_LOG(ERR, "Invalid VSI.");
1019 vsi->offset_loaded = false;
1020 i40e_update_vsi_stats(vsi);
1026 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1028 struct rte_eth_dev *dev;
1030 struct i40e_vsi *vsi;
1035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1037 dev = &rte_eth_devices[port];
1039 if (!is_i40e_supported(dev))
1042 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044 if (vf_id >= pf->vf_num || !pf->vfs) {
1045 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1049 vsi = pf->vfs[vf_id].vsi;
1051 PMD_DRV_LOG(ERR, "Invalid VSI.");
1055 if (bw > I40E_QOS_BW_MAX) {
1056 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1061 if (bw % I40E_QOS_BW_GRANULARITY) {
1062 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1063 I40E_QOS_BW_GRANULARITY);
1067 bw /= I40E_QOS_BW_GRANULARITY;
1069 hw = I40E_VSI_TO_HW(vsi);
1072 if (bw == vsi->bw_info.bw_limit) {
1074 "No change for VF max bandwidth. Nothing to do.");
1079 * VF bandwidth limitation and TC bandwidth limitation cannot be
1080 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1082 * If bw is 0, means disable bandwidth limitation. Then no need to
1083 * check TC bandwidth limitation.
1086 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1087 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1088 vsi->bw_info.bw_ets_credits[i])
1091 if (i != I40E_MAX_TRAFFIC_CLASS) {
1093 "TC max bandwidth has been set on this VF,"
1094 " please disable it first.");
1099 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1102 "Failed to set VF %d bandwidth, err(%d).",
1107 /* Store the configuration. */
1108 vsi->bw_info.bw_limit = (uint16_t)bw;
1109 vsi->bw_info.bw_max = 0;
1115 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1116 uint8_t tc_num, uint8_t *bw_weight)
1118 struct rte_eth_dev *dev;
1120 struct i40e_vsi *vsi;
1122 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1126 bool b_change = false;
1128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1130 dev = &rte_eth_devices[port];
1132 if (!is_i40e_supported(dev))
1135 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1137 if (vf_id >= pf->vf_num || !pf->vfs) {
1138 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1142 vsi = pf->vfs[vf_id].vsi;
1144 PMD_DRV_LOG(ERR, "Invalid VSI.");
1148 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1149 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1150 I40E_MAX_TRAFFIC_CLASS);
1155 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1156 if (vsi->enabled_tc & BIT_ULL(i))
1159 if (sum != tc_num) {
1161 "Weight should be set for all %d enabled TCs.",
1167 for (i = 0; i < tc_num; i++) {
1168 if (!bw_weight[i]) {
1170 "The weight should be 1 at least.");
1173 sum += bw_weight[i];
1177 "The summary of the TC weight should be 100.");
1182 * Create the configuration for all the TCs.
1184 memset(&tc_bw, 0, sizeof(tc_bw));
1185 tc_bw.tc_valid_bits = vsi->enabled_tc;
1187 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1188 if (vsi->enabled_tc & BIT_ULL(i)) {
1190 vsi->bw_info.bw_ets_share_credits[i])
1193 tc_bw.tc_bw_credits[i] = bw_weight[j];
1201 "No change for TC allocated bandwidth."
1206 hw = I40E_VSI_TO_HW(vsi);
1208 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1211 "Failed to set VF %d TC bandwidth weight, err(%d).",
1216 /* Store the configuration. */
1218 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1219 if (vsi->enabled_tc & BIT_ULL(i)) {
1220 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1229 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1230 uint8_t tc_no, uint32_t bw)
1232 struct rte_eth_dev *dev;
1234 struct i40e_vsi *vsi;
1236 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1240 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1242 dev = &rte_eth_devices[port];
1244 if (!is_i40e_supported(dev))
1247 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1249 if (vf_id >= pf->vf_num || !pf->vfs) {
1250 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1254 vsi = pf->vfs[vf_id].vsi;
1256 PMD_DRV_LOG(ERR, "Invalid VSI.");
1260 if (bw > I40E_QOS_BW_MAX) {
1261 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1266 if (bw % I40E_QOS_BW_GRANULARITY) {
1267 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1268 I40E_QOS_BW_GRANULARITY);
1272 bw /= I40E_QOS_BW_GRANULARITY;
1274 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1275 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1276 I40E_MAX_TRAFFIC_CLASS);
1280 hw = I40E_VSI_TO_HW(vsi);
1282 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1283 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1289 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1291 "No change for TC max bandwidth. Nothing to do.");
1296 * VF bandwidth limitation and TC bandwidth limitation cannot be
1297 * enabled in parallel, disable VF bandwidth limitation if it's
1299 * If bw is 0, means disable bandwidth limitation. Then no need to
1300 * care about VF bandwidth limitation configuration.
1302 if (bw && vsi->bw_info.bw_limit) {
1303 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1306 "Failed to disable VF(%d)"
1307 " bandwidth limitation, err(%d).",
1313 "VF max bandwidth is disabled according"
1314 " to TC max bandwidth setting.");
1318 * Get all the TCs' info to create a whole picture.
1319 * Because the incremental change isn't permitted.
1321 memset(&tc_bw, 0, sizeof(tc_bw));
1322 tc_bw.tc_valid_bits = vsi->enabled_tc;
1323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1324 if (vsi->enabled_tc & BIT_ULL(i)) {
1325 tc_bw.tc_bw_credits[i] =
1327 vsi->bw_info.bw_ets_credits[i]);
1330 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1332 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1335 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1340 /* Store the configuration. */
1341 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1347 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1349 struct rte_eth_dev *dev;
1351 struct i40e_vsi *vsi;
1352 struct i40e_veb *veb;
1354 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1360 dev = &rte_eth_devices[port];
1362 if (!is_i40e_supported(dev))
1365 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1369 PMD_DRV_LOG(ERR, "Invalid VSI.");
1375 PMD_DRV_LOG(ERR, "Invalid VEB.");
1379 if ((tc_map & veb->enabled_tc) != tc_map) {
1381 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1386 if (tc_map == veb->strict_prio_tc) {
1387 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1391 hw = I40E_VSI_TO_HW(vsi);
1393 /* Disable DCBx if it's the first time to set strict priority. */
1394 if (!veb->strict_prio_tc) {
1395 ret = i40e_aq_stop_lldp(hw, true, NULL);
1398 "Failed to disable DCBx as it's already"
1402 "DCBx is disabled according to strict"
1403 " priority setting.");
1406 memset(&ets_data, 0, sizeof(ets_data));
1407 ets_data.tc_valid_bits = veb->enabled_tc;
1408 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1409 ets_data.tc_strict_priority_flags = tc_map;
1410 /* Get all TCs' bandwidth. */
1411 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412 if (veb->enabled_tc & BIT_ULL(i)) {
1413 /* For rubust, if bandwidth is 0, use 1 instead. */
1414 if (veb->bw_info.bw_ets_share_credits[i])
1415 ets_data.tc_bw_share_credits[i] =
1416 veb->bw_info.bw_ets_share_credits[i];
1418 ets_data.tc_bw_share_credits[i] =
1419 I40E_QOS_BW_WEIGHT_MIN;
1423 if (!veb->strict_prio_tc)
1424 ret = i40e_aq_config_switch_comp_ets(
1425 hw, veb->uplink_seid,
1426 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1429 ret = i40e_aq_config_switch_comp_ets(
1430 hw, veb->uplink_seid,
1431 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1434 ret = i40e_aq_config_switch_comp_ets(
1435 hw, veb->uplink_seid,
1436 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1441 "Failed to set TCs' strict priority mode."
1446 veb->strict_prio_tc = tc_map;
1448 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1450 ret = i40e_aq_start_lldp(hw, NULL);
1453 "Failed to enable DCBx, err(%d).", ret);
1458 "DCBx is enabled again according to strict"
1459 " priority setting.");
1465 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1466 #define I40E_MAX_PROFILE_NUM 16
1469 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1470 uint32_t track_id, uint8_t *profile_info_sec,
1473 struct i40e_profile_section_header *sec = NULL;
1474 struct i40e_profile_info *pinfo;
1476 sec = (struct i40e_profile_section_header *)profile_info_sec;
1478 sec->data_end = sizeof(struct i40e_profile_section_header) +
1479 sizeof(struct i40e_profile_info);
1480 sec->section.type = SECTION_TYPE_INFO;
1481 sec->section.offset = sizeof(struct i40e_profile_section_header);
1482 sec->section.size = sizeof(struct i40e_profile_info);
1483 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1484 sec->section.offset);
1485 pinfo->track_id = track_id;
1486 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1487 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1489 pinfo->op = I40E_DDP_ADD_TRACKID;
1491 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1494 static enum i40e_status_code
1495 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1497 enum i40e_status_code status = I40E_SUCCESS;
1498 struct i40e_profile_section_header *sec;
1500 uint32_t offset = 0;
1503 sec = (struct i40e_profile_section_header *)profile_info_sec;
1504 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1505 sec->section.offset))->track_id;
1507 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1508 track_id, &offset, &info, NULL);
1510 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1511 "offset %d, info %d",
1517 /* Check if the profile info exists */
1519 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1521 struct rte_eth_dev *dev = &rte_eth_devices[port];
1522 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524 struct rte_pmd_i40e_profile_list *p_list;
1525 struct rte_pmd_i40e_profile_info *pinfo, *p;
1528 static const uint32_t group_mask = 0x00ff0000;
1530 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1531 sizeof(struct i40e_profile_section_header));
1532 if (pinfo->track_id == 0) {
1533 PMD_DRV_LOG(INFO, "Read-only profile.");
1536 buff = rte_zmalloc("pinfo_list",
1537 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1540 PMD_DRV_LOG(ERR, "failed to allocate memory");
1544 ret = i40e_aq_get_ddp_list(
1546 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1549 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1553 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1554 for (i = 0; i < p_list->p_count; i++) {
1555 p = &p_list->p_info[i];
1556 if (pinfo->track_id == p->track_id) {
1557 PMD_DRV_LOG(INFO, "Profile exists.");
1562 for (i = 0; i < p_list->p_count; i++) {
1563 p = &p_list->p_info[i];
1564 if ((p->track_id & group_mask) == 0) {
1565 PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1570 for (i = 0; i < p_list->p_count; i++) {
1571 p = &p_list->p_info[i];
1572 if ((pinfo->track_id & group_mask) !=
1573 (p->track_id & group_mask)) {
1574 PMD_DRV_LOG(INFO, "Profile of different group exists.");
1585 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1587 enum rte_pmd_i40e_package_op op)
1589 struct rte_eth_dev *dev;
1591 struct i40e_package_header *pkg_hdr;
1592 struct i40e_generic_seg_header *profile_seg_hdr;
1593 struct i40e_generic_seg_header *metadata_seg_hdr;
1595 uint8_t *profile_info_sec;
1597 enum i40e_status_code status = I40E_SUCCESS;
1598 static const uint32_t type_mask = 0xff000000;
1600 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1601 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1602 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1603 PMD_DRV_LOG(ERR, "Operation not supported.");
1607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1609 dev = &rte_eth_devices[port];
1611 if (!is_i40e_supported(dev))
1614 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1616 if (size < (sizeof(struct i40e_package_header) +
1617 sizeof(struct i40e_metadata_segment) +
1618 sizeof(uint32_t) * 2)) {
1619 PMD_DRV_LOG(ERR, "Buff is invalid.");
1623 pkg_hdr = (struct i40e_package_header *)buff;
1626 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1630 if (pkg_hdr->segment_count < 2) {
1631 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1635 /* Find metadata segment */
1636 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1638 if (!metadata_seg_hdr) {
1639 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1642 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1643 if (track_id == I40E_DDP_TRACKID_INVALID) {
1644 PMD_DRV_LOG(ERR, "Invalid track_id");
1648 /* force read-only track_id for type 0 */
1649 if ((track_id & type_mask) == 0)
1652 /* Find profile segment */
1653 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1655 if (!profile_seg_hdr) {
1656 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1660 profile_info_sec = rte_zmalloc(
1661 "i40e_profile_info",
1662 sizeof(struct i40e_profile_section_header) +
1663 sizeof(struct i40e_profile_info),
1665 if (!profile_info_sec) {
1666 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1670 /* Check if the profile already loaded */
1671 i40e_generate_profile_info_sec(
1672 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1673 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1674 track_id, profile_info_sec,
1675 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1676 is_exist = i40e_check_profile_info(port, profile_info_sec);
1678 PMD_DRV_LOG(ERR, "Failed to check profile.");
1679 rte_free(profile_info_sec);
1683 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1686 PMD_DRV_LOG(ERR, "Profile already exists.");
1687 else if (is_exist == 2)
1688 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1689 else if (is_exist == 3)
1690 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1691 rte_free(profile_info_sec);
1694 } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1695 if (is_exist != 1) {
1696 PMD_DRV_LOG(ERR, "Profile does not exist.");
1697 rte_free(profile_info_sec);
1702 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1703 status = i40e_rollback_profile(
1705 (struct i40e_profile_segment *)profile_seg_hdr,
1708 PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1709 rte_free(profile_info_sec);
1713 status = i40e_write_profile(
1715 (struct i40e_profile_segment *)profile_seg_hdr,
1718 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1719 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1721 PMD_DRV_LOG(ERR, "Failed to write profile.");
1722 rte_free(profile_info_sec);
1727 if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1728 /* Modify loaded profiles info list */
1729 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1731 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1732 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1734 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1738 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1739 op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1740 i40e_update_customized_info(dev, buff, size, op);
1742 rte_free(profile_info_sec);
1746 /* Get number of tvl records in the section */
1748 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1750 unsigned int i, nb_rec, nb_tlv = 0;
1751 struct i40e_profile_tlv_section_record *tlv;
1756 /* get number of records in the section */
1757 nb_rec = sec->section.size /
1758 sizeof(struct i40e_profile_tlv_section_record);
1759 for (i = 0; i < nb_rec; ) {
1760 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1767 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1768 uint8_t *info_buff, uint32_t info_size,
1769 enum rte_pmd_i40e_package_info type)
1772 struct i40e_package_header *pkg_hdr;
1773 struct i40e_generic_seg_header *i40e_seg_hdr;
1774 struct i40e_generic_seg_header *note_seg_hdr;
1775 struct i40e_generic_seg_header *metadata_seg_hdr;
1778 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1782 if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1783 sizeof(struct i40e_metadata_segment) +
1784 sizeof(uint32_t) * 2)) {
1785 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1789 pkg_hdr = (struct i40e_package_header *)pkg_buff;
1790 if (pkg_hdr->segment_count < 2) {
1791 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1795 /* Find metadata segment */
1796 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1799 /* Find global notes segment */
1800 note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1803 /* Find i40e profile segment */
1804 i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1806 /* get global header info */
1807 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1808 struct rte_pmd_i40e_profile_info *info =
1809 (struct rte_pmd_i40e_profile_info *)info_buff;
1811 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1812 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1816 if (!metadata_seg_hdr) {
1817 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1821 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1822 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1824 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1827 ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1828 I40E_DDP_NAME_SIZE);
1829 memcpy(&info->version,
1830 &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1831 sizeof(struct i40e_ddp_version));
1832 return I40E_SUCCESS;
1835 /* get global note size */
1836 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1837 if (info_size < sizeof(uint32_t)) {
1838 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1841 if (note_seg_hdr == NULL)
1844 ret_size = note_seg_hdr->size;
1845 *(uint32_t *)info_buff = ret_size;
1846 return I40E_SUCCESS;
1849 /* get global note */
1850 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1851 if (note_seg_hdr == NULL)
1853 if (info_size < note_seg_hdr->size) {
1854 PMD_DRV_LOG(ERR, "Information buffer size is too small");
1857 memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size);
1858 return I40E_SUCCESS;
1861 /* get i40e segment header info */
1862 if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1863 struct rte_pmd_i40e_profile_info *info =
1864 (struct rte_pmd_i40e_profile_info *)info_buff;
1866 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1867 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1871 if (!metadata_seg_hdr) {
1872 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1876 if (!i40e_seg_hdr) {
1877 PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1881 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1882 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1884 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1887 ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1888 I40E_DDP_NAME_SIZE);
1889 memcpy(&info->version,
1890 &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1891 sizeof(struct i40e_ddp_version));
1892 return I40E_SUCCESS;
1895 /* get number of devices */
1896 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1897 if (info_size < sizeof(uint32_t)) {
1898 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1901 *(uint32_t *)info_buff =
1902 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1903 return I40E_SUCCESS;
1906 /* get list of devices */
1907 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1910 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1911 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1912 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1916 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1917 sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1918 return I40E_SUCCESS;
1921 /* get number of protocols */
1922 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1923 struct i40e_profile_section_header *proto;
1925 if (info_size < sizeof(uint32_t)) {
1926 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1929 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1930 (struct i40e_profile_segment *)i40e_seg_hdr);
1931 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1932 return I40E_SUCCESS;
1935 /* get list of protocols */
1936 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1937 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1938 struct rte_pmd_i40e_proto_info *pinfo;
1939 struct i40e_profile_section_header *proto;
1940 struct i40e_profile_tlv_section_record *tlv;
1942 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1943 nb_proto_info = info_size /
1944 sizeof(struct rte_pmd_i40e_proto_info);
1945 for (i = 0; i < nb_proto_info; i++) {
1946 pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1947 memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1949 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1950 (struct i40e_profile_segment *)i40e_seg_hdr);
1951 nb_tlv = i40e_get_tlv_section_size(proto);
1953 return I40E_SUCCESS;
1954 if (nb_proto_info < nb_tlv) {
1955 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1958 /* get number of records in the section */
1959 nb_rec = proto->section.size /
1960 sizeof(struct i40e_profile_tlv_section_record);
1961 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1962 for (i = j = 0; i < nb_rec; j++) {
1963 pinfo[j].proto_id = tlv->data[0];
1964 snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1965 (const char *)&tlv->data[1]);
1967 tlv = &tlv[tlv->len];
1969 return I40E_SUCCESS;
1972 /* get number of packet classification types */
1973 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1974 struct i40e_profile_section_header *pctype;
1976 if (info_size < sizeof(uint32_t)) {
1977 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1980 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1981 (struct i40e_profile_segment *)i40e_seg_hdr);
1982 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1983 return I40E_SUCCESS;
1986 /* get list of packet classification types */
1987 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1988 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1989 struct rte_pmd_i40e_ptype_info *pinfo;
1990 struct i40e_profile_section_header *pctype;
1991 struct i40e_profile_tlv_section_record *tlv;
1993 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1994 nb_proto_info = info_size /
1995 sizeof(struct rte_pmd_i40e_ptype_info);
1996 for (i = 0; i < nb_proto_info; i++)
1997 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1998 sizeof(struct rte_pmd_i40e_ptype_info));
1999 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2000 (struct i40e_profile_segment *)i40e_seg_hdr);
2001 nb_tlv = i40e_get_tlv_section_size(pctype);
2003 return I40E_SUCCESS;
2004 if (nb_proto_info < nb_tlv) {
2005 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2009 /* get number of records in the section */
2010 nb_rec = pctype->section.size /
2011 sizeof(struct i40e_profile_tlv_section_record);
2012 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2013 for (i = j = 0; i < nb_rec; j++) {
2014 memcpy(&pinfo[j], tlv->data,
2015 sizeof(struct rte_pmd_i40e_ptype_info));
2017 tlv = &tlv[tlv->len];
2019 return I40E_SUCCESS;
2022 /* get number of packet types */
2023 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2024 struct i40e_profile_section_header *ptype;
2026 if (info_size < sizeof(uint32_t)) {
2027 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2030 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2031 (struct i40e_profile_segment *)i40e_seg_hdr);
2032 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2033 return I40E_SUCCESS;
2036 /* get list of packet types */
2037 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2038 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2039 struct rte_pmd_i40e_ptype_info *pinfo;
2040 struct i40e_profile_section_header *ptype;
2041 struct i40e_profile_tlv_section_record *tlv;
2043 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2044 nb_proto_info = info_size /
2045 sizeof(struct rte_pmd_i40e_ptype_info);
2046 for (i = 0; i < nb_proto_info; i++)
2047 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2048 sizeof(struct rte_pmd_i40e_ptype_info));
2049 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2050 (struct i40e_profile_segment *)i40e_seg_hdr);
2051 nb_tlv = i40e_get_tlv_section_size(ptype);
2053 return I40E_SUCCESS;
2054 if (nb_proto_info < nb_tlv) {
2055 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2058 /* get number of records in the section */
2059 nb_rec = ptype->section.size /
2060 sizeof(struct i40e_profile_tlv_section_record);
2061 for (i = j = 0; i < nb_rec; j++) {
2062 tlv = (struct i40e_profile_tlv_section_record *)
2064 memcpy(&pinfo[j], tlv->data,
2065 sizeof(struct rte_pmd_i40e_ptype_info));
2068 return I40E_SUCCESS;
2071 PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2076 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2078 struct rte_eth_dev *dev;
2080 enum i40e_status_code status = I40E_SUCCESS;
2082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2084 dev = &rte_eth_devices[port];
2086 if (!is_i40e_supported(dev))
2089 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2092 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094 status = i40e_aq_get_ddp_list(hw, (void *)buff,
2100 static int check_invalid_pkt_type(uint32_t pkt_type)
2102 uint32_t l2, l3, l4, tnl, il2, il3, il4;
2104 l2 = pkt_type & RTE_PTYPE_L2_MASK;
2105 l3 = pkt_type & RTE_PTYPE_L3_MASK;
2106 l4 = pkt_type & RTE_PTYPE_L4_MASK;
2107 tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2108 il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2109 il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2110 il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2113 l2 != RTE_PTYPE_L2_ETHER &&
2114 l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2115 l2 != RTE_PTYPE_L2_ETHER_ARP &&
2116 l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2117 l2 != RTE_PTYPE_L2_ETHER_NSH &&
2118 l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2119 l2 != RTE_PTYPE_L2_ETHER_QINQ)
2123 l3 != RTE_PTYPE_L3_IPV4 &&
2124 l3 != RTE_PTYPE_L3_IPV4_EXT &&
2125 l3 != RTE_PTYPE_L3_IPV6 &&
2126 l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2127 l3 != RTE_PTYPE_L3_IPV6_EXT &&
2128 l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2132 l4 != RTE_PTYPE_L4_TCP &&
2133 l4 != RTE_PTYPE_L4_UDP &&
2134 l4 != RTE_PTYPE_L4_FRAG &&
2135 l4 != RTE_PTYPE_L4_SCTP &&
2136 l4 != RTE_PTYPE_L4_ICMP &&
2137 l4 != RTE_PTYPE_L4_NONFRAG)
2141 tnl != RTE_PTYPE_TUNNEL_IP &&
2142 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2143 tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2144 tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2145 tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2146 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2147 tnl != RTE_PTYPE_TUNNEL_GTPC &&
2148 tnl != RTE_PTYPE_TUNNEL_GTPU)
2152 il2 != RTE_PTYPE_INNER_L2_ETHER &&
2153 il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2154 il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2158 il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2159 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2160 il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2161 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2162 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2163 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2167 il4 != RTE_PTYPE_INNER_L4_TCP &&
2168 il4 != RTE_PTYPE_INNER_L4_UDP &&
2169 il4 != RTE_PTYPE_INNER_L4_FRAG &&
2170 il4 != RTE_PTYPE_INNER_L4_SCTP &&
2171 il4 != RTE_PTYPE_INNER_L4_ICMP &&
2172 il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2178 static int check_invalid_ptype_mapping(
2179 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2184 for (i = 0; i < count; i++) {
2185 uint16_t ptype = mapping_table[i].hw_ptype;
2186 uint32_t pkt_type = mapping_table[i].sw_ptype;
2188 if (ptype >= I40E_MAX_PKT_TYPE)
2191 if (pkt_type == RTE_PTYPE_UNKNOWN)
2194 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2197 if (check_invalid_pkt_type(pkt_type))
2205 rte_pmd_i40e_ptype_mapping_update(
2207 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2211 struct rte_eth_dev *dev;
2212 struct i40e_adapter *ad;
2215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2217 dev = &rte_eth_devices[port];
2219 if (!is_i40e_supported(dev))
2222 if (count > I40E_MAX_PKT_TYPE)
2225 if (check_invalid_ptype_mapping(mapping_items, count))
2228 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2231 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2232 ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2235 for (i = 0; i < count; i++)
2236 ad->ptype_tbl[mapping_items[i].hw_ptype]
2237 = mapping_items[i].sw_ptype;
2242 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2244 struct rte_eth_dev *dev;
2246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2248 dev = &rte_eth_devices[port];
2250 if (!is_i40e_supported(dev))
2253 i40e_set_default_ptype_table(dev);
2258 int rte_pmd_i40e_ptype_mapping_get(
2260 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2265 struct rte_eth_dev *dev;
2266 struct i40e_adapter *ad;
2270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2272 dev = &rte_eth_devices[port];
2274 if (!is_i40e_supported(dev))
2277 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2279 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2282 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2284 mapping_items[n].hw_ptype = i;
2285 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2293 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2298 struct rte_eth_dev *dev;
2299 struct i40e_adapter *ad;
2302 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2304 dev = &rte_eth_devices[port];
2306 if (!is_i40e_supported(dev))
2309 if (!mask && check_invalid_pkt_type(target))
2312 if (check_invalid_pkt_type(pkt_type))
2315 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2317 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2319 if ((target | ad->ptype_tbl[i]) == target &&
2320 (target & ad->ptype_tbl[i]))
2321 ad->ptype_tbl[i] = pkt_type;
2323 if (ad->ptype_tbl[i] == target)
2324 ad->ptype_tbl[i] = pkt_type;
2332 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2333 struct ether_addr *mac_addr)
2335 struct rte_eth_dev *dev;
2336 struct i40e_pf_vf *vf;
2337 struct i40e_vsi *vsi;
2339 struct i40e_mac_filter_info mac_filter;
2342 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2345 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2347 dev = &rte_eth_devices[port];
2349 if (!is_i40e_supported(dev))
2352 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2354 if (vf_id >= pf->vf_num || !pf->vfs)
2357 vf = &pf->vfs[vf_id];
2360 PMD_DRV_LOG(ERR, "Invalid VSI.");
2364 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2365 ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2366 ret = i40e_vsi_add_mac(vsi, &mac_filter);
2367 if (ret != I40E_SUCCESS) {
2368 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2375 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2377 struct rte_eth_dev *dev;
2379 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2381 dev = &rte_eth_devices[port];
2383 if (!is_i40e_supported(dev))
2386 i40e_set_default_pctype_table(dev);
2391 int rte_pmd_i40e_flow_type_mapping_get(
2393 struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2395 struct rte_eth_dev *dev;
2396 struct i40e_adapter *ad;
2399 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2401 dev = &rte_eth_devices[port];
2403 if (!is_i40e_supported(dev))
2406 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2408 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2409 mapping_items[i].flow_type = i;
2410 mapping_items[i].pctype = ad->pctypes_tbl[i];
2417 rte_pmd_i40e_flow_type_mapping_update(
2419 struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2423 struct rte_eth_dev *dev;
2424 struct i40e_adapter *ad;
2427 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2429 dev = &rte_eth_devices[port];
2431 if (!is_i40e_supported(dev))
2434 if (count > I40E_FLOW_TYPE_MAX)
2437 for (i = 0; i < count; i++)
2438 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2439 mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2440 (mapping_items[i].pctype &
2441 (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2444 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2447 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2448 ad->pctypes_tbl[i] = 0ULL;
2449 ad->flow_types_mask = 0ULL;
2452 for (i = 0; i < count; i++) {
2453 ad->pctypes_tbl[mapping_items[i].flow_type] =
2454 mapping_items[i].pctype;
2455 if (mapping_items[i].pctype)
2456 ad->flow_types_mask |=
2457 (1ULL << mapping_items[i].flow_type);
2459 ad->flow_types_mask &=
2460 ~(1ULL << mapping_items[i].flow_type);
2463 for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2464 ad->pctypes_mask |= ad->pctypes_tbl[i];
2470 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2472 struct rte_eth_dev *dev;
2473 struct ether_addr *mac;
2476 struct i40e_pf_vf *vf;
2479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2480 dev = &rte_eth_devices[port];
2482 if (!is_i40e_supported(dev))
2485 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2486 vf_num = pf->vf_num;
2488 for (vf_id = 0; vf_id < vf_num; vf_id++) {
2489 vf = &pf->vfs[vf_id];
2490 mac = &vf->mac_addr;
2492 if (is_same_ether_addr(mac, vf_mac))
2500 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2504 struct i40e_vsi *vsi = pf->main_vsi;
2505 uint16_t queue_offset, bsf, tc_index;
2506 struct i40e_vsi_context ctxt;
2507 struct i40e_aqc_vsi_properties_data *vsi_info;
2508 struct i40e_queue_regions *region_info =
2510 int32_t ret = -EINVAL;
2512 if (!region_info->queue_region_number) {
2513 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2517 memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2519 /* Update Queue Pairs Mapping for currently enabled UPs */
2520 ctxt.seid = vsi->seid;
2521 ctxt.pf_num = hw->pf_id;
2523 ctxt.uplink_seid = vsi->uplink_seid;
2524 ctxt.info = vsi->info;
2525 vsi_info = &ctxt.info;
2527 memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2528 memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2530 /* Configure queue region and queue mapping parameters,
2531 * for enabled queue region, allocate queues to this region.
2534 for (i = 0; i < region_info->queue_region_number; i++) {
2535 tc_index = region_info->region[i].region_id;
2536 bsf = rte_bsf32(region_info->region[i].queue_num);
2537 queue_offset = region_info->region[i].queue_start_index;
2538 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2539 (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2540 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2543 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2544 vsi_info->mapping_flags |=
2545 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2546 vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2547 vsi_info->valid_sections |=
2548 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2550 /* Update the VSI after updating the VSI queue-mapping information */
2551 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2553 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2554 hw->aq.asq_last_status);
2557 /* update the local VSI info with updated queue map */
2558 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2559 sizeof(vsi->info.tc_mapping));
2560 rte_memcpy(&vsi->info.queue_mapping,
2561 &ctxt.info.queue_mapping,
2562 sizeof(vsi->info.queue_mapping));
2563 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2564 vsi->info.valid_sections = 0;
2571 i40e_queue_region_set_region(struct i40e_pf *pf,
2572 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2575 struct i40e_vsi *main_vsi = pf->main_vsi;
2576 struct i40e_queue_regions *info = &pf->queue_region;
2577 int32_t ret = -EINVAL;
2579 if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2580 conf_ptr->queue_num <= 64)) {
2581 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2582 "total number of queues do not exceed the VSI allocation");
2586 if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2587 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2591 if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2592 > main_vsi->nb_used_qps) {
2593 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2597 for (i = 0; i < info->queue_region_number; i++)
2598 if (conf_ptr->region_id == info->region[i].region_id)
2601 if (i == info->queue_region_number &&
2602 i <= I40E_REGION_MAX_INDEX) {
2603 info->region[i].region_id = conf_ptr->region_id;
2604 info->region[i].queue_num = conf_ptr->queue_num;
2605 info->region[i].queue_start_index =
2606 conf_ptr->queue_start_index;
2607 info->queue_region_number++;
2609 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2617 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2618 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2620 int32_t ret = -EINVAL;
2621 struct i40e_queue_regions *info = &pf->queue_region;
2623 uint16_t region_index, flowtype_index;
2625 /* For the pctype or hardware flowtype of packet,
2626 * the specific index for each type has been defined
2627 * in file i40e_type.h as enum i40e_filter_pctype.
2630 if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2631 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2635 if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2636 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2641 for (i = 0; i < info->queue_region_number; i++)
2642 if (rss_region_conf->region_id == info->region[i].region_id)
2645 if (i == info->queue_region_number) {
2646 PMD_DRV_LOG(ERR, "that region id has not been set before");
2652 for (i = 0; i < info->queue_region_number; i++) {
2653 for (j = 0; j < info->region[i].flowtype_num; j++) {
2654 if (rss_region_conf->hw_flowtype ==
2655 info->region[i].hw_flowtype[j]) {
2656 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2662 flowtype_index = info->region[region_index].flowtype_num;
2663 info->region[region_index].hw_flowtype[flowtype_index] =
2664 rss_region_conf->hw_flowtype;
2665 info->region[region_index].flowtype_num++;
2671 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2674 uint8_t hw_flowtype;
2675 uint32_t pfqf_hregion;
2676 uint16_t i, j, index;
2677 struct i40e_queue_regions *info = &pf->queue_region;
2679 /* For the pctype or hardware flowtype of packet,
2680 * the specific index for each type has been defined
2681 * in file i40e_type.h as enum i40e_filter_pctype.
2684 for (i = 0; i < info->queue_region_number; i++) {
2685 for (j = 0; j < info->region[i].flowtype_num; j++) {
2686 hw_flowtype = info->region[i].hw_flowtype[j];
2687 index = hw_flowtype >> 3;
2689 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2691 if ((hw_flowtype & 0x7) == 0) {
2692 pfqf_hregion |= info->region[i].region_id <<
2693 I40E_PFQF_HREGION_REGION_0_SHIFT;
2694 pfqf_hregion |= 1 <<
2695 I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2696 } else if ((hw_flowtype & 0x7) == 1) {
2697 pfqf_hregion |= info->region[i].region_id <<
2698 I40E_PFQF_HREGION_REGION_1_SHIFT;
2699 pfqf_hregion |= 1 <<
2700 I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2701 } else if ((hw_flowtype & 0x7) == 2) {
2702 pfqf_hregion |= info->region[i].region_id <<
2703 I40E_PFQF_HREGION_REGION_2_SHIFT;
2704 pfqf_hregion |= 1 <<
2705 I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2706 } else if ((hw_flowtype & 0x7) == 3) {
2707 pfqf_hregion |= info->region[i].region_id <<
2708 I40E_PFQF_HREGION_REGION_3_SHIFT;
2709 pfqf_hregion |= 1 <<
2710 I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2711 } else if ((hw_flowtype & 0x7) == 4) {
2712 pfqf_hregion |= info->region[i].region_id <<
2713 I40E_PFQF_HREGION_REGION_4_SHIFT;
2714 pfqf_hregion |= 1 <<
2715 I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2716 } else if ((hw_flowtype & 0x7) == 5) {
2717 pfqf_hregion |= info->region[i].region_id <<
2718 I40E_PFQF_HREGION_REGION_5_SHIFT;
2719 pfqf_hregion |= 1 <<
2720 I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2721 } else if ((hw_flowtype & 0x7) == 6) {
2722 pfqf_hregion |= info->region[i].region_id <<
2723 I40E_PFQF_HREGION_REGION_6_SHIFT;
2724 pfqf_hregion |= 1 <<
2725 I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2727 pfqf_hregion |= info->region[i].region_id <<
2728 I40E_PFQF_HREGION_REGION_7_SHIFT;
2729 pfqf_hregion |= 1 <<
2730 I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2733 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2740 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2741 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2743 struct i40e_queue_regions *info = &pf->queue_region;
2744 int32_t ret = -EINVAL;
2745 uint16_t i, j, region_index;
2747 if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2748 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2752 if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2753 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2757 for (i = 0; i < info->queue_region_number; i++)
2758 if (rss_region_conf->region_id == info->region[i].region_id)
2761 if (i == info->queue_region_number) {
2762 PMD_DRV_LOG(ERR, "that region id has not been set before");
2769 for (i = 0; i < info->queue_region_number; i++) {
2770 for (j = 0; j < info->region[i].user_priority_num; j++) {
2771 if (info->region[i].user_priority[j] ==
2772 rss_region_conf->user_priority) {
2773 PMD_DRV_LOG(ERR, "that user priority has been set before");
2779 j = info->region[region_index].user_priority_num;
2780 info->region[region_index].user_priority[j] =
2781 rss_region_conf->user_priority;
2782 info->region[region_index].user_priority_num++;
2788 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2791 struct i40e_dcbx_config dcb_cfg_local;
2792 struct i40e_dcbx_config *dcb_cfg;
2793 struct i40e_queue_regions *info = &pf->queue_region;
2794 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2795 int32_t ret = -EINVAL;
2796 uint16_t i, j, prio_index, region_index;
2797 uint8_t tc_map, tc_bw, bw_lf;
2799 if (!info->queue_region_number) {
2800 PMD_DRV_LOG(ERR, "No queue region been set before");
2804 dcb_cfg = &dcb_cfg_local;
2805 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2807 /* assume each tc has the same bw */
2808 tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2809 for (i = 0; i < info->queue_region_number; i++)
2810 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2811 /* to ensure the sum of tcbw is equal to 100 */
2812 bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
2813 for (i = 0; i < bw_lf; i++)
2814 dcb_cfg->etscfg.tcbwtable[i]++;
2816 /* assume each tc has the same Transmission Selection Algorithm */
2817 for (i = 0; i < info->queue_region_number; i++)
2818 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2820 for (i = 0; i < info->queue_region_number; i++) {
2821 for (j = 0; j < info->region[i].user_priority_num; j++) {
2822 prio_index = info->region[i].user_priority[j];
2823 region_index = info->region[i].region_id;
2824 dcb_cfg->etscfg.prioritytable[prio_index] =
2829 /* FW needs one App to configure HW */
2830 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2831 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2832 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2833 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2835 tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2837 dcb_cfg->pfc.willing = 0;
2838 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2839 dcb_cfg->pfc.pfcenable = tc_map;
2841 /* Copy the new config to the current config */
2842 *old_cfg = *dcb_cfg;
2843 old_cfg->etsrec = old_cfg->etscfg;
2844 ret = i40e_set_dcb_config(hw);
2847 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2848 i40e_stat_str(hw, ret),
2849 i40e_aq_str(hw, hw->aq.asq_last_status));
2857 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2858 struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2860 int32_t ret = -EINVAL;
2861 struct i40e_queue_regions *info = &pf->queue_region;
2862 struct i40e_vsi *main_vsi = pf->main_vsi;
2865 i40e_queue_region_pf_flowtype_conf(hw, pf);
2867 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2868 if (ret != I40E_SUCCESS) {
2869 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2873 ret = i40e_queue_region_dcb_configure(hw, pf);
2874 if (ret != I40E_SUCCESS) {
2875 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2882 if (info->queue_region_number) {
2883 info->queue_region_number = 1;
2884 info->region[0].queue_num = main_vsi->nb_used_qps;
2885 info->region[0].queue_start_index = 0;
2887 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2888 if (ret != I40E_SUCCESS)
2889 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2891 ret = i40e_dcb_init_configure(dev, TRUE);
2892 if (ret != I40E_SUCCESS) {
2893 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2894 pf->flags &= ~I40E_FLAG_DCB;
2897 i40e_init_queue_region_conf(dev);
2903 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2905 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2908 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2909 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2918 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2919 struct i40e_queue_regions *regions_ptr)
2921 struct i40e_queue_regions *info = &pf->queue_region;
2923 rte_memcpy(regions_ptr, info,
2924 sizeof(struct i40e_queue_regions));
2929 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2930 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2932 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2933 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2934 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2937 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2939 if (!is_i40e_supported(dev))
2942 if (!(!i40e_queue_region_pf_check_rss(pf)))
2945 /* This queue region feature only support pf by now. It should
2946 * be called after dev_start, and will be clear after dev_stop.
2947 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2948 * is just an enable function which server for other configuration,
2949 * it is for all configuration about queue region from up layer,
2950 * at first will only keep in DPDK softwarestored in driver,
2951 * only after "FLUSH_ON", it commit all configuration to HW.
2952 * Because PMD had to set hardware configuration at a time, so
2953 * it will record all up layer command at first.
2954 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2955 * just clean all configuration about queue region just now,
2956 * and restore all to DPDK i40e driver default
2957 * config when start up.
2961 case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2962 ret = i40e_queue_region_set_region(pf,
2963 (struct rte_pmd_i40e_queue_region_conf *)arg);
2965 case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2966 ret = i40e_queue_region_set_flowtype(pf,
2967 (struct rte_pmd_i40e_queue_region_conf *)arg);
2969 case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2970 ret = i40e_queue_region_set_user_priority(pf,
2971 (struct rte_pmd_i40e_queue_region_conf *)arg);
2973 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2974 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2976 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2977 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2979 case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2980 ret = i40e_queue_region_get_all_info(pf,
2981 (struct i40e_queue_regions *)arg);
2984 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2989 I40E_WRITE_FLUSH(hw);
2994 int rte_pmd_i40e_flow_add_del_packet_template(
2996 const struct rte_pmd_i40e_pkt_template_conf *conf,
2999 struct rte_eth_dev *dev = &rte_eth_devices[port];
3000 struct i40e_fdir_filter_conf filter_conf;
3002 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3004 if (!is_i40e_supported(dev))
3007 memset(&filter_conf, 0, sizeof(filter_conf));
3008 filter_conf.soft_id = conf->soft_id;
3009 filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3010 filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3011 filter_conf.input.flow.raw_flow.length = conf->input.length;
3012 filter_conf.input.flow_ext.pkt_template = true;
3014 filter_conf.action.rx_queue = conf->action.rx_queue;
3015 filter_conf.action.behavior =
3016 (enum i40e_fdir_behavior)conf->action.behavior;
3017 filter_conf.action.report_status =
3018 (enum i40e_fdir_status)conf->action.report_status;
3019 filter_conf.action.flex_off = conf->action.flex_off;
3021 return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);