New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
36
37 #include "base/i40e_prototype.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
40 #include "i40e_pf.h"
41 #include "i40e_rxtx.h"
42 #include "rte_pmd_i40e.h"
43
44 int
45 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
46 {
47         struct rte_eth_dev *dev;
48         struct i40e_pf *pf;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
65
66         return 0;
67 }
68
69 int
70 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
71 {
72         struct rte_eth_dev *dev;
73         struct i40e_pf *pf;
74         struct i40e_vsi *vsi;
75         struct i40e_hw *hw;
76         struct i40e_vsi_context ctxt;
77         int ret;
78
79         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
80
81         dev = &rte_eth_devices[port];
82
83         if (!is_i40e_supported(dev))
84                 return -ENOTSUP;
85
86         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
87
88         if (vf_id >= pf->vf_num || !pf->vfs) {
89                 PMD_DRV_LOG(ERR, "Invalid argument.");
90                 return -EINVAL;
91         }
92
93         vsi = pf->vfs[vf_id].vsi;
94         if (!vsi) {
95                 PMD_DRV_LOG(ERR, "Invalid VSI.");
96                 return -EINVAL;
97         }
98
99         /* Check if it has been already on or off */
100         if (vsi->info.valid_sections &
101                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
102                 if (on) {
103                         if ((vsi->info.sec_flags &
104                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
105                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
106                                 return 0; /* already on */
107                 } else {
108                         if ((vsi->info.sec_flags &
109                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
110                                 return 0; /* already off */
111                 }
112         }
113
114         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
115         if (on)
116                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
117         else
118                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
119
120         memset(&ctxt, 0, sizeof(ctxt));
121         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
122         ctxt.seid = vsi->seid;
123
124         hw = I40E_VSI_TO_HW(vsi);
125         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
126         if (ret != I40E_SUCCESS) {
127                 ret = -ENOTSUP;
128                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
129         }
130
131         return ret;
132 }
133
134 static int
135 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
136 {
137         uint32_t j, k;
138         uint16_t vlan_id;
139         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
140         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
141         int ret;
142
143         for (j = 0; j < I40E_VFTA_SIZE; j++) {
144                 if (!vsi->vfta[j])
145                         continue;
146
147                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
148                         if (!(vsi->vfta[j] & (1 << k)))
149                                 continue;
150
151                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
152                         if (!vlan_id)
153                                 continue;
154
155                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
156                         if (add)
157                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
158                                                        &vlan_data, 1, NULL);
159                         else
160                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
161                                                           &vlan_data, 1, NULL);
162                         if (ret != I40E_SUCCESS) {
163                                 PMD_DRV_LOG(ERR,
164                                             "Failed to add/rm vlan filter");
165                                 return ret;
166                         }
167                 }
168         }
169
170         return I40E_SUCCESS;
171 }
172
173 int
174 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
175 {
176         struct rte_eth_dev *dev;
177         struct i40e_pf *pf;
178         struct i40e_vsi *vsi;
179         struct i40e_hw *hw;
180         struct i40e_vsi_context ctxt;
181         int ret;
182
183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
184
185         dev = &rte_eth_devices[port];
186
187         if (!is_i40e_supported(dev))
188                 return -ENOTSUP;
189
190         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
191
192         if (vf_id >= pf->vf_num || !pf->vfs) {
193                 PMD_DRV_LOG(ERR, "Invalid argument.");
194                 return -EINVAL;
195         }
196
197         vsi = pf->vfs[vf_id].vsi;
198         if (!vsi) {
199                 PMD_DRV_LOG(ERR, "Invalid VSI.");
200                 return -EINVAL;
201         }
202
203         /* Check if it has been already on or off */
204         if (vsi->vlan_anti_spoof_on == on)
205                 return 0; /* already on or off */
206
207         vsi->vlan_anti_spoof_on = on;
208         if (!vsi->vlan_filter_on) {
209                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
210                 if (ret) {
211                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
212                         return -ENOTSUP;
213                 }
214         }
215
216         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
217         if (on)
218                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
219         else
220                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
221
222         memset(&ctxt, 0, sizeof(ctxt));
223         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
224         ctxt.seid = vsi->seid;
225
226         hw = I40E_VSI_TO_HW(vsi);
227         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
228         if (ret != I40E_SUCCESS) {
229                 ret = -ENOTSUP;
230                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
231         }
232
233         return ret;
234 }
235
236 static int
237 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
238 {
239         struct i40e_mac_filter *f;
240         struct i40e_macvlan_filter *mv_f;
241         int i, vlan_num;
242         enum rte_mac_filter_type filter_type;
243         int ret = I40E_SUCCESS;
244         void *temp;
245
246         /* remove all the MACs */
247         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
248                 vlan_num = vsi->vlan_num;
249                 filter_type = f->mac_info.filter_type;
250                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
251                     filter_type == RTE_MACVLAN_HASH_MATCH) {
252                         if (vlan_num == 0) {
253                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
254                                 return I40E_ERR_PARAM;
255                         }
256                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
257                            filter_type == RTE_MAC_HASH_MATCH)
258                         vlan_num = 1;
259
260                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
261                 if (!mv_f) {
262                         PMD_DRV_LOG(ERR, "failed to allocate memory");
263                         return I40E_ERR_NO_MEMORY;
264                 }
265
266                 for (i = 0; i < vlan_num; i++) {
267                         mv_f[i].filter_type = filter_type;
268                         rte_memcpy(&mv_f[i].macaddr,
269                                          &f->mac_info.mac_addr,
270                                          ETH_ADDR_LEN);
271                 }
272                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
273                     filter_type == RTE_MACVLAN_HASH_MATCH) {
274                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
275                                                          &f->mac_info.mac_addr);
276                         if (ret != I40E_SUCCESS) {
277                                 rte_free(mv_f);
278                                 return ret;
279                         }
280                 }
281
282                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
283                 if (ret != I40E_SUCCESS) {
284                         rte_free(mv_f);
285                         return ret;
286                 }
287
288                 rte_free(mv_f);
289                 ret = I40E_SUCCESS;
290         }
291
292         return ret;
293 }
294
295 static int
296 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
297 {
298         struct i40e_mac_filter *f;
299         struct i40e_macvlan_filter *mv_f;
300         int i, vlan_num = 0;
301         int ret = I40E_SUCCESS;
302         void *temp;
303
304         /* restore all the MACs */
305         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
306                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
307                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
308                         /**
309                          * If vlan_num is 0, that's the first time to add mac,
310                          * set mask for vlan_id 0.
311                          */
312                         if (vsi->vlan_num == 0) {
313                                 i40e_set_vlan_filter(vsi, 0, 1);
314                                 vsi->vlan_num = 1;
315                         }
316                         vlan_num = vsi->vlan_num;
317                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
318                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
319                         vlan_num = 1;
320
321                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
322                 if (!mv_f) {
323                         PMD_DRV_LOG(ERR, "failed to allocate memory");
324                         return I40E_ERR_NO_MEMORY;
325                 }
326
327                 for (i = 0; i < vlan_num; i++) {
328                         mv_f[i].filter_type = f->mac_info.filter_type;
329                         rte_memcpy(&mv_f[i].macaddr,
330                                          &f->mac_info.mac_addr,
331                                          ETH_ADDR_LEN);
332                 }
333
334                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
335                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
336                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
337                                                          &f->mac_info.mac_addr);
338                         if (ret != I40E_SUCCESS) {
339                                 rte_free(mv_f);
340                                 return ret;
341                         }
342                 }
343
344                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
345                 if (ret != I40E_SUCCESS) {
346                         rte_free(mv_f);
347                         return ret;
348                 }
349
350                 rte_free(mv_f);
351                 ret = I40E_SUCCESS;
352         }
353
354         return ret;
355 }
356
357 static int
358 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
359 {
360         struct i40e_vsi_context ctxt;
361         struct i40e_hw *hw;
362         int ret;
363
364         if (!vsi)
365                 return -EINVAL;
366
367         hw = I40E_VSI_TO_HW(vsi);
368
369         /* Use the FW API if FW >= v5.0 */
370         if (hw->aq.fw_maj_ver < 5) {
371                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
372                 return -ENOTSUP;
373         }
374
375         /* Check if it has been already on or off */
376         if (vsi->info.valid_sections &
377                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
378                 if (on) {
379                         if ((vsi->info.switch_id &
380                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
381                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
382                                 return 0; /* already on */
383                 } else {
384                         if ((vsi->info.switch_id &
385                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
386                                 return 0; /* already off */
387                 }
388         }
389
390         /* remove all the MAC and VLAN first */
391         ret = i40e_vsi_rm_mac_filter(vsi);
392         if (ret) {
393                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
394                 return ret;
395         }
396         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
398                 if (ret) {
399                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
400                         return ret;
401                 }
402         }
403
404         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
405         if (on)
406                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
407         else
408                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
409
410         memset(&ctxt, 0, sizeof(ctxt));
411         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
412         ctxt.seid = vsi->seid;
413
414         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
415         if (ret != I40E_SUCCESS) {
416                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
417                 return ret;
418         }
419
420         /* add all the MAC and VLAN back */
421         ret = i40e_vsi_restore_mac_filter(vsi);
422         if (ret)
423                 return ret;
424         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
425                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
426                 if (ret)
427                         return ret;
428         }
429
430         return ret;
431 }
432
433 int
434 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
435 {
436         struct rte_eth_dev *dev;
437         struct i40e_pf *pf;
438         struct i40e_pf_vf *vf;
439         struct i40e_vsi *vsi;
440         uint16_t vf_id;
441         int ret;
442
443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
444
445         dev = &rte_eth_devices[port];
446
447         if (!is_i40e_supported(dev))
448                 return -ENOTSUP;
449
450         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
451
452         /* setup PF TX loopback */
453         vsi = pf->main_vsi;
454         ret = i40e_vsi_set_tx_loopback(vsi, on);
455         if (ret)
456                 return -ENOTSUP;
457
458         /* setup TX loopback for all the VFs */
459         if (!pf->vfs) {
460                 /* if no VF, do nothing. */
461                 return 0;
462         }
463
464         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
465                 vf = &pf->vfs[vf_id];
466                 vsi = vf->vsi;
467
468                 ret = i40e_vsi_set_tx_loopback(vsi, on);
469                 if (ret)
470                         return -ENOTSUP;
471         }
472
473         return ret;
474 }
475
476 int
477 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
478 {
479         struct rte_eth_dev *dev;
480         struct i40e_pf *pf;
481         struct i40e_vsi *vsi;
482         struct i40e_hw *hw;
483         int ret;
484
485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
486
487         dev = &rte_eth_devices[port];
488
489         if (!is_i40e_supported(dev))
490                 return -ENOTSUP;
491
492         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
493
494         if (vf_id >= pf->vf_num || !pf->vfs) {
495                 PMD_DRV_LOG(ERR, "Invalid argument.");
496                 return -EINVAL;
497         }
498
499         vsi = pf->vfs[vf_id].vsi;
500         if (!vsi) {
501                 PMD_DRV_LOG(ERR, "Invalid VSI.");
502                 return -EINVAL;
503         }
504
505         hw = I40E_VSI_TO_HW(vsi);
506
507         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
508                                                   on, NULL, true);
509         if (ret != I40E_SUCCESS) {
510                 ret = -ENOTSUP;
511                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
512         }
513
514         return ret;
515 }
516
517 int
518 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
519 {
520         struct rte_eth_dev *dev;
521         struct i40e_pf *pf;
522         struct i40e_vsi *vsi;
523         struct i40e_hw *hw;
524         int ret;
525
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
527
528         dev = &rte_eth_devices[port];
529
530         if (!is_i40e_supported(dev))
531                 return -ENOTSUP;
532
533         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
534
535         if (vf_id >= pf->vf_num || !pf->vfs) {
536                 PMD_DRV_LOG(ERR, "Invalid argument.");
537                 return -EINVAL;
538         }
539
540         vsi = pf->vfs[vf_id].vsi;
541         if (!vsi) {
542                 PMD_DRV_LOG(ERR, "Invalid VSI.");
543                 return -EINVAL;
544         }
545
546         hw = I40E_VSI_TO_HW(vsi);
547
548         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
549                                                     on, NULL);
550         if (ret != I40E_SUCCESS) {
551                 ret = -ENOTSUP;
552                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
553         }
554
555         return ret;
556 }
557
558 int
559 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
560                              struct ether_addr *mac_addr)
561 {
562         struct i40e_mac_filter *f;
563         struct rte_eth_dev *dev;
564         struct i40e_pf_vf *vf;
565         struct i40e_vsi *vsi;
566         struct i40e_pf *pf;
567         void *temp;
568
569         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
570                 return -EINVAL;
571
572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
573
574         dev = &rte_eth_devices[port];
575
576         if (!is_i40e_supported(dev))
577                 return -ENOTSUP;
578
579         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
580
581         if (vf_id >= pf->vf_num || !pf->vfs)
582                 return -EINVAL;
583
584         vf = &pf->vfs[vf_id];
585         vsi = vf->vsi;
586         if (!vsi) {
587                 PMD_DRV_LOG(ERR, "Invalid VSI.");
588                 return -EINVAL;
589         }
590
591         ether_addr_copy(mac_addr, &vf->mac_addr);
592
593         /* Remove all existing mac */
594         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
595                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
596                                 != I40E_SUCCESS)
597                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
598
599         return 0;
600 }
601
602 /* Set vlan strip on/off for specific VF from host */
603 int
604 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
605 {
606         struct rte_eth_dev *dev;
607         struct i40e_pf *pf;
608         struct i40e_vsi *vsi;
609         int ret;
610
611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
612
613         dev = &rte_eth_devices[port];
614
615         if (!is_i40e_supported(dev))
616                 return -ENOTSUP;
617
618         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
619
620         if (vf_id >= pf->vf_num || !pf->vfs) {
621                 PMD_DRV_LOG(ERR, "Invalid argument.");
622                 return -EINVAL;
623         }
624
625         vsi = pf->vfs[vf_id].vsi;
626
627         if (!vsi)
628                 return -EINVAL;
629
630         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
631         if (ret != I40E_SUCCESS) {
632                 ret = -ENOTSUP;
633                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
634         }
635
636         return ret;
637 }
638
639 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
640                                     uint16_t vlan_id)
641 {
642         struct rte_eth_dev *dev;
643         struct i40e_pf *pf;
644         struct i40e_hw *hw;
645         struct i40e_vsi *vsi;
646         struct i40e_vsi_context ctxt;
647         int ret;
648
649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
650
651         if (vlan_id > ETHER_MAX_VLAN_ID) {
652                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
653                 return -EINVAL;
654         }
655
656         dev = &rte_eth_devices[port];
657
658         if (!is_i40e_supported(dev))
659                 return -ENOTSUP;
660
661         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662         hw = I40E_PF_TO_HW(pf);
663
664         /**
665          * return -ENODEV if SRIOV not enabled, VF number not configured
666          * or no queue assigned.
667          */
668         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
669             pf->vf_nb_qps == 0)
670                 return -ENODEV;
671
672         if (vf_id >= pf->vf_num || !pf->vfs) {
673                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
674                 return -EINVAL;
675         }
676
677         vsi = pf->vfs[vf_id].vsi;
678         if (!vsi) {
679                 PMD_DRV_LOG(ERR, "Invalid VSI.");
680                 return -EINVAL;
681         }
682
683         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
684         vsi->info.pvid = vlan_id;
685         if (vlan_id > 0)
686                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
687         else
688                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
689
690         memset(&ctxt, 0, sizeof(ctxt));
691         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
692         ctxt.seid = vsi->seid;
693
694         hw = I40E_VSI_TO_HW(vsi);
695         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
696         if (ret != I40E_SUCCESS) {
697                 ret = -ENOTSUP;
698                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
699         }
700
701         return ret;
702 }
703
704 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
705                                   uint8_t on)
706 {
707         struct rte_eth_dev *dev;
708         struct i40e_pf *pf;
709         struct i40e_vsi *vsi;
710         struct i40e_hw *hw;
711         struct i40e_mac_filter_info filter;
712         struct ether_addr broadcast = {
713                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
714         int ret;
715
716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
717
718         if (on > 1) {
719                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
720                 return -EINVAL;
721         }
722
723         dev = &rte_eth_devices[port];
724
725         if (!is_i40e_supported(dev))
726                 return -ENOTSUP;
727
728         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729         hw = I40E_PF_TO_HW(pf);
730
731         if (vf_id >= pf->vf_num || !pf->vfs) {
732                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
733                 return -EINVAL;
734         }
735
736         /**
737          * return -ENODEV if SRIOV not enabled, VF number not configured
738          * or no queue assigned.
739          */
740         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
741             pf->vf_nb_qps == 0) {
742                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
743                 return -ENODEV;
744         }
745
746         vsi = pf->vfs[vf_id].vsi;
747         if (!vsi) {
748                 PMD_DRV_LOG(ERR, "Invalid VSI.");
749                 return -EINVAL;
750         }
751
752         if (on) {
753                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
754                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
755                 ret = i40e_vsi_add_mac(vsi, &filter);
756         } else {
757                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
758         }
759
760         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
761                 ret = -ENOTSUP;
762                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
763         } else {
764                 ret = 0;
765         }
766
767         return ret;
768 }
769
770 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
771 {
772         struct rte_eth_dev *dev;
773         struct i40e_pf *pf;
774         struct i40e_hw *hw;
775         struct i40e_vsi *vsi;
776         struct i40e_vsi_context ctxt;
777         int ret;
778
779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
780
781         if (on > 1) {
782                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
783                 return -EINVAL;
784         }
785
786         dev = &rte_eth_devices[port];
787
788         if (!is_i40e_supported(dev))
789                 return -ENOTSUP;
790
791         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
792         hw = I40E_PF_TO_HW(pf);
793
794         /**
795          * return -ENODEV if SRIOV not enabled, VF number not configured
796          * or no queue assigned.
797          */
798         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
799             pf->vf_nb_qps == 0) {
800                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
801                 return -ENODEV;
802         }
803
804         if (vf_id >= pf->vf_num || !pf->vfs) {
805                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
806                 return -EINVAL;
807         }
808
809         vsi = pf->vfs[vf_id].vsi;
810         if (!vsi) {
811                 PMD_DRV_LOG(ERR, "Invalid VSI.");
812                 return -EINVAL;
813         }
814
815         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
816         if (on) {
817                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
818                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
819         } else {
820                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
821                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
822         }
823
824         memset(&ctxt, 0, sizeof(ctxt));
825         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
826         ctxt.seid = vsi->seid;
827
828         hw = I40E_VSI_TO_HW(vsi);
829         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
830         if (ret != I40E_SUCCESS) {
831                 ret = -ENOTSUP;
832                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
833         }
834
835         return ret;
836 }
837
838 static int
839 i40e_vlan_filter_count(struct i40e_vsi *vsi)
840 {
841         uint32_t j, k;
842         uint16_t vlan_id;
843         int count = 0;
844
845         for (j = 0; j < I40E_VFTA_SIZE; j++) {
846                 if (!vsi->vfta[j])
847                         continue;
848
849                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
850                         if (!(vsi->vfta[j] & (1 << k)))
851                                 continue;
852
853                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
854                         if (!vlan_id)
855                                 continue;
856
857                         count++;
858                 }
859         }
860
861         return count;
862 }
863
864 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
865                                     uint64_t vf_mask, uint8_t on)
866 {
867         struct rte_eth_dev *dev;
868         struct i40e_pf *pf;
869         struct i40e_hw *hw;
870         struct i40e_vsi *vsi;
871         uint16_t vf_idx;
872         int ret = I40E_SUCCESS;
873
874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
875
876         dev = &rte_eth_devices[port];
877
878         if (!is_i40e_supported(dev))
879                 return -ENOTSUP;
880
881         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
882                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
883                 return -EINVAL;
884         }
885
886         if (vf_mask == 0) {
887                 PMD_DRV_LOG(ERR, "No VF.");
888                 return -EINVAL;
889         }
890
891         if (on > 1) {
892                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
893                 return -EINVAL;
894         }
895
896         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
897         hw = I40E_PF_TO_HW(pf);
898
899         /**
900          * return -ENODEV if SRIOV not enabled, VF number not configured
901          * or no queue assigned.
902          */
903         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
904             pf->vf_nb_qps == 0) {
905                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
906                 return -ENODEV;
907         }
908
909         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
910                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
911                         vsi = pf->vfs[vf_idx].vsi;
912                         if (on) {
913                                 if (!vsi->vlan_filter_on) {
914                                         vsi->vlan_filter_on = true;
915                                         i40e_aq_set_vsi_vlan_promisc(hw,
916                                                                      vsi->seid,
917                                                                      false,
918                                                                      NULL);
919                                         if (!vsi->vlan_anti_spoof_on)
920                                                 i40e_add_rm_all_vlan_filter(
921                                                         vsi, true);
922                                 }
923                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
924                         } else {
925                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
926
927                                 if (!i40e_vlan_filter_count(vsi)) {
928                                         vsi->vlan_filter_on = false;
929                                         i40e_aq_set_vsi_vlan_promisc(hw,
930                                                                      vsi->seid,
931                                                                      true,
932                                                                      NULL);
933                                 }
934                         }
935                 }
936         }
937
938         if (ret != I40E_SUCCESS) {
939                 ret = -ENOTSUP;
940                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
941         }
942
943         return ret;
944 }
945
946 int
947 rte_pmd_i40e_get_vf_stats(uint16_t port,
948                           uint16_t vf_id,
949                           struct rte_eth_stats *stats)
950 {
951         struct rte_eth_dev *dev;
952         struct i40e_pf *pf;
953         struct i40e_vsi *vsi;
954
955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
956
957         dev = &rte_eth_devices[port];
958
959         if (!is_i40e_supported(dev))
960                 return -ENOTSUP;
961
962         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
963
964         if (vf_id >= pf->vf_num || !pf->vfs) {
965                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
966                 return -EINVAL;
967         }
968
969         vsi = pf->vfs[vf_id].vsi;
970         if (!vsi) {
971                 PMD_DRV_LOG(ERR, "Invalid VSI.");
972                 return -EINVAL;
973         }
974
975         i40e_update_vsi_stats(vsi);
976
977         stats->ipackets = vsi->eth_stats.rx_unicast +
978                         vsi->eth_stats.rx_multicast +
979                         vsi->eth_stats.rx_broadcast;
980         stats->opackets = vsi->eth_stats.tx_unicast +
981                         vsi->eth_stats.tx_multicast +
982                         vsi->eth_stats.tx_broadcast;
983         stats->ibytes   = vsi->eth_stats.rx_bytes;
984         stats->obytes   = vsi->eth_stats.tx_bytes;
985         stats->ierrors  = vsi->eth_stats.rx_discards;
986         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
987
988         return 0;
989 }
990
991 int
992 rte_pmd_i40e_reset_vf_stats(uint16_t port,
993                             uint16_t vf_id)
994 {
995         struct rte_eth_dev *dev;
996         struct i40e_pf *pf;
997         struct i40e_vsi *vsi;
998
999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1000
1001         dev = &rte_eth_devices[port];
1002
1003         if (!is_i40e_supported(dev))
1004                 return -ENOTSUP;
1005
1006         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1007
1008         if (vf_id >= pf->vf_num || !pf->vfs) {
1009                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1010                 return -EINVAL;
1011         }
1012
1013         vsi = pf->vfs[vf_id].vsi;
1014         if (!vsi) {
1015                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1016                 return -EINVAL;
1017         }
1018
1019         vsi->offset_loaded = false;
1020         i40e_update_vsi_stats(vsi);
1021
1022         return 0;
1023 }
1024
1025 int
1026 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1027 {
1028         struct rte_eth_dev *dev;
1029         struct i40e_pf *pf;
1030         struct i40e_vsi *vsi;
1031         struct i40e_hw *hw;
1032         int ret = 0;
1033         int i;
1034
1035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1036
1037         dev = &rte_eth_devices[port];
1038
1039         if (!is_i40e_supported(dev))
1040                 return -ENOTSUP;
1041
1042         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1043
1044         if (vf_id >= pf->vf_num || !pf->vfs) {
1045                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1046                 return -EINVAL;
1047         }
1048
1049         vsi = pf->vfs[vf_id].vsi;
1050         if (!vsi) {
1051                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1052                 return -EINVAL;
1053         }
1054
1055         if (bw > I40E_QOS_BW_MAX) {
1056                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1057                             I40E_QOS_BW_MAX);
1058                 return -EINVAL;
1059         }
1060
1061         if (bw % I40E_QOS_BW_GRANULARITY) {
1062                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1063                             I40E_QOS_BW_GRANULARITY);
1064                 return -EINVAL;
1065         }
1066
1067         bw /= I40E_QOS_BW_GRANULARITY;
1068
1069         hw = I40E_VSI_TO_HW(vsi);
1070
1071         /* No change. */
1072         if (bw == vsi->bw_info.bw_limit) {
1073                 PMD_DRV_LOG(INFO,
1074                             "No change for VF max bandwidth. Nothing to do.");
1075                 return 0;
1076         }
1077
1078         /**
1079          * VF bandwidth limitation and TC bandwidth limitation cannot be
1080          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1081          *
1082          * If bw is 0, means disable bandwidth limitation. Then no need to
1083          * check TC bandwidth limitation.
1084          */
1085         if (bw) {
1086                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1087                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1088                             vsi->bw_info.bw_ets_credits[i])
1089                                 break;
1090                 }
1091                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1092                         PMD_DRV_LOG(ERR,
1093                                     "TC max bandwidth has been set on this VF,"
1094                                     " please disable it first.");
1095                         return -EINVAL;
1096                 }
1097         }
1098
1099         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1100         if (ret) {
1101                 PMD_DRV_LOG(ERR,
1102                             "Failed to set VF %d bandwidth, err(%d).",
1103                             vf_id, ret);
1104                 return -EINVAL;
1105         }
1106
1107         /* Store the configuration. */
1108         vsi->bw_info.bw_limit = (uint16_t)bw;
1109         vsi->bw_info.bw_max = 0;
1110
1111         return 0;
1112 }
1113
1114 int
1115 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1116                                 uint8_t tc_num, uint8_t *bw_weight)
1117 {
1118         struct rte_eth_dev *dev;
1119         struct i40e_pf *pf;
1120         struct i40e_vsi *vsi;
1121         struct i40e_hw *hw;
1122         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1123         int ret = 0;
1124         int i, j;
1125         uint16_t sum;
1126         bool b_change = false;
1127
1128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1129
1130         dev = &rte_eth_devices[port];
1131
1132         if (!is_i40e_supported(dev))
1133                 return -ENOTSUP;
1134
1135         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1136
1137         if (vf_id >= pf->vf_num || !pf->vfs) {
1138                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1139                 return -EINVAL;
1140         }
1141
1142         vsi = pf->vfs[vf_id].vsi;
1143         if (!vsi) {
1144                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1145                 return -EINVAL;
1146         }
1147
1148         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1149                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1150                             I40E_MAX_TRAFFIC_CLASS);
1151                 return -EINVAL;
1152         }
1153
1154         sum = 0;
1155         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1156                 if (vsi->enabled_tc & BIT_ULL(i))
1157                         sum++;
1158         }
1159         if (sum != tc_num) {
1160                 PMD_DRV_LOG(ERR,
1161                             "Weight should be set for all %d enabled TCs.",
1162                             sum);
1163                 return -EINVAL;
1164         }
1165
1166         sum = 0;
1167         for (i = 0; i < tc_num; i++) {
1168                 if (!bw_weight[i]) {
1169                         PMD_DRV_LOG(ERR,
1170                                     "The weight should be 1 at least.");
1171                         return -EINVAL;
1172                 }
1173                 sum += bw_weight[i];
1174         }
1175         if (sum != 100) {
1176                 PMD_DRV_LOG(ERR,
1177                             "The summary of the TC weight should be 100.");
1178                 return -EINVAL;
1179         }
1180
1181         /**
1182          * Create the configuration for all the TCs.
1183          */
1184         memset(&tc_bw, 0, sizeof(tc_bw));
1185         tc_bw.tc_valid_bits = vsi->enabled_tc;
1186         j = 0;
1187         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1188                 if (vsi->enabled_tc & BIT_ULL(i)) {
1189                         if (bw_weight[j] !=
1190                                 vsi->bw_info.bw_ets_share_credits[i])
1191                                 b_change = true;
1192
1193                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1194                         j++;
1195                 }
1196         }
1197
1198         /* No change. */
1199         if (!b_change) {
1200                 PMD_DRV_LOG(INFO,
1201                             "No change for TC allocated bandwidth."
1202                             " Nothing to do.");
1203                 return 0;
1204         }
1205
1206         hw = I40E_VSI_TO_HW(vsi);
1207
1208         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1209         if (ret) {
1210                 PMD_DRV_LOG(ERR,
1211                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1212                             vf_id, ret);
1213                 return -EINVAL;
1214         }
1215
1216         /* Store the configuration. */
1217         j = 0;
1218         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1219                 if (vsi->enabled_tc & BIT_ULL(i)) {
1220                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1221                         j++;
1222                 }
1223         }
1224
1225         return 0;
1226 }
1227
1228 int
1229 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1230                               uint8_t tc_no, uint32_t bw)
1231 {
1232         struct rte_eth_dev *dev;
1233         struct i40e_pf *pf;
1234         struct i40e_vsi *vsi;
1235         struct i40e_hw *hw;
1236         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1237         int ret = 0;
1238         int i;
1239
1240         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1241
1242         dev = &rte_eth_devices[port];
1243
1244         if (!is_i40e_supported(dev))
1245                 return -ENOTSUP;
1246
1247         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1248
1249         if (vf_id >= pf->vf_num || !pf->vfs) {
1250                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1251                 return -EINVAL;
1252         }
1253
1254         vsi = pf->vfs[vf_id].vsi;
1255         if (!vsi) {
1256                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1257                 return -EINVAL;
1258         }
1259
1260         if (bw > I40E_QOS_BW_MAX) {
1261                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1262                             I40E_QOS_BW_MAX);
1263                 return -EINVAL;
1264         }
1265
1266         if (bw % I40E_QOS_BW_GRANULARITY) {
1267                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1268                             I40E_QOS_BW_GRANULARITY);
1269                 return -EINVAL;
1270         }
1271
1272         bw /= I40E_QOS_BW_GRANULARITY;
1273
1274         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1275                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1276                             I40E_MAX_TRAFFIC_CLASS);
1277                 return -EINVAL;
1278         }
1279
1280         hw = I40E_VSI_TO_HW(vsi);
1281
1282         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1283                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1284                             vf_id, tc_no);
1285                 return -EINVAL;
1286         }
1287
1288         /* No change. */
1289         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1290                 PMD_DRV_LOG(INFO,
1291                             "No change for TC max bandwidth. Nothing to do.");
1292                 return 0;
1293         }
1294
1295         /**
1296          * VF bandwidth limitation and TC bandwidth limitation cannot be
1297          * enabled in parallel, disable VF bandwidth limitation if it's
1298          * enabled.
1299          * If bw is 0, means disable bandwidth limitation. Then no need to
1300          * care about VF bandwidth limitation configuration.
1301          */
1302         if (bw && vsi->bw_info.bw_limit) {
1303                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1304                 if (ret) {
1305                         PMD_DRV_LOG(ERR,
1306                                     "Failed to disable VF(%d)"
1307                                     " bandwidth limitation, err(%d).",
1308                                     vf_id, ret);
1309                         return -EINVAL;
1310                 }
1311
1312                 PMD_DRV_LOG(INFO,
1313                             "VF max bandwidth is disabled according"
1314                             " to TC max bandwidth setting.");
1315         }
1316
1317         /**
1318          * Get all the TCs' info to create a whole picture.
1319          * Because the incremental change isn't permitted.
1320          */
1321         memset(&tc_bw, 0, sizeof(tc_bw));
1322         tc_bw.tc_valid_bits = vsi->enabled_tc;
1323         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1324                 if (vsi->enabled_tc & BIT_ULL(i)) {
1325                         tc_bw.tc_bw_credits[i] =
1326                                 rte_cpu_to_le_16(
1327                                         vsi->bw_info.bw_ets_credits[i]);
1328                 }
1329         }
1330         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1331
1332         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1333         if (ret) {
1334                 PMD_DRV_LOG(ERR,
1335                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1336                             vf_id, tc_no, ret);
1337                 return -EINVAL;
1338         }
1339
1340         /* Store the configuration. */
1341         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1342
1343         return 0;
1344 }
1345
1346 int
1347 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1348 {
1349         struct rte_eth_dev *dev;
1350         struct i40e_pf *pf;
1351         struct i40e_vsi *vsi;
1352         struct i40e_veb *veb;
1353         struct i40e_hw *hw;
1354         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1355         int i;
1356         int ret;
1357
1358         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1359
1360         dev = &rte_eth_devices[port];
1361
1362         if (!is_i40e_supported(dev))
1363                 return -ENOTSUP;
1364
1365         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1366
1367         vsi = pf->main_vsi;
1368         if (!vsi) {
1369                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1370                 return -EINVAL;
1371         }
1372
1373         veb = vsi->veb;
1374         if (!veb) {
1375                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1376                 return -EINVAL;
1377         }
1378
1379         if ((tc_map & veb->enabled_tc) != tc_map) {
1380                 PMD_DRV_LOG(ERR,
1381                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1382                             veb->enabled_tc);
1383                 return -EINVAL;
1384         }
1385
1386         if (tc_map == veb->strict_prio_tc) {
1387                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1388                 return 0;
1389         }
1390
1391         hw = I40E_VSI_TO_HW(vsi);
1392
1393         /* Disable DCBx if it's the first time to set strict priority. */
1394         if (!veb->strict_prio_tc) {
1395                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1396                 if (ret)
1397                         PMD_DRV_LOG(INFO,
1398                                     "Failed to disable DCBx as it's already"
1399                                     " disabled.");
1400                 else
1401                         PMD_DRV_LOG(INFO,
1402                                     "DCBx is disabled according to strict"
1403                                     " priority setting.");
1404         }
1405
1406         memset(&ets_data, 0, sizeof(ets_data));
1407         ets_data.tc_valid_bits = veb->enabled_tc;
1408         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1409         ets_data.tc_strict_priority_flags = tc_map;
1410         /* Get all TCs' bandwidth. */
1411         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412                 if (veb->enabled_tc & BIT_ULL(i)) {
1413                         /* For rubust, if bandwidth is 0, use 1 instead. */
1414                         if (veb->bw_info.bw_ets_share_credits[i])
1415                                 ets_data.tc_bw_share_credits[i] =
1416                                         veb->bw_info.bw_ets_share_credits[i];
1417                         else
1418                                 ets_data.tc_bw_share_credits[i] =
1419                                         I40E_QOS_BW_WEIGHT_MIN;
1420                 }
1421         }
1422
1423         if (!veb->strict_prio_tc)
1424                 ret = i40e_aq_config_switch_comp_ets(
1425                         hw, veb->uplink_seid,
1426                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1427                         NULL);
1428         else if (tc_map)
1429                 ret = i40e_aq_config_switch_comp_ets(
1430                         hw, veb->uplink_seid,
1431                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1432                         NULL);
1433         else
1434                 ret = i40e_aq_config_switch_comp_ets(
1435                         hw, veb->uplink_seid,
1436                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1437                         NULL);
1438
1439         if (ret) {
1440                 PMD_DRV_LOG(ERR,
1441                             "Failed to set TCs' strict priority mode."
1442                             " err (%d)", ret);
1443                 return -EINVAL;
1444         }
1445
1446         veb->strict_prio_tc = tc_map;
1447
1448         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1449         if (!tc_map) {
1450                 ret = i40e_aq_start_lldp(hw, NULL);
1451                 if (ret) {
1452                         PMD_DRV_LOG(ERR,
1453                                     "Failed to enable DCBx, err(%d).", ret);
1454                         return -EINVAL;
1455                 }
1456
1457                 PMD_DRV_LOG(INFO,
1458                             "DCBx is enabled again according to strict"
1459                             " priority setting.");
1460         }
1461
1462         return ret;
1463 }
1464
1465 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1466 #define I40E_MAX_PROFILE_NUM 16
1467
1468 static void
1469 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1470                                uint32_t track_id, uint8_t *profile_info_sec,
1471                                bool add)
1472 {
1473         struct i40e_profile_section_header *sec = NULL;
1474         struct i40e_profile_info *pinfo;
1475
1476         sec = (struct i40e_profile_section_header *)profile_info_sec;
1477         sec->tbl_size = 1;
1478         sec->data_end = sizeof(struct i40e_profile_section_header) +
1479                 sizeof(struct i40e_profile_info);
1480         sec->section.type = SECTION_TYPE_INFO;
1481         sec->section.offset = sizeof(struct i40e_profile_section_header);
1482         sec->section.size = sizeof(struct i40e_profile_info);
1483         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1484                                              sec->section.offset);
1485         pinfo->track_id = track_id;
1486         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1487         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1488         if (add)
1489                 pinfo->op = I40E_DDP_ADD_TRACKID;
1490         else
1491                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1492 }
1493
1494 static enum i40e_status_code
1495 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1496 {
1497         enum i40e_status_code status = I40E_SUCCESS;
1498         struct i40e_profile_section_header *sec;
1499         uint32_t track_id;
1500         uint32_t offset = 0;
1501         uint32_t info = 0;
1502
1503         sec = (struct i40e_profile_section_header *)profile_info_sec;
1504         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1505                                          sec->section.offset))->track_id;
1506
1507         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1508                                    track_id, &offset, &info, NULL);
1509         if (status)
1510                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1511                             "offset %d, info %d",
1512                             offset, info);
1513
1514         return status;
1515 }
1516
1517 /* Check if the profile info exists */
1518 static int
1519 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1520 {
1521         struct rte_eth_dev *dev = &rte_eth_devices[port];
1522         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1523         uint8_t *buff;
1524         struct rte_pmd_i40e_profile_list *p_list;
1525         struct rte_pmd_i40e_profile_info *pinfo, *p;
1526         uint32_t i;
1527         int ret;
1528         static const uint32_t group_mask = 0x00ff0000;
1529
1530         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1531                              sizeof(struct i40e_profile_section_header));
1532         if (pinfo->track_id == 0) {
1533                 PMD_DRV_LOG(INFO, "Read-only profile.");
1534                 return 0;
1535         }
1536         buff = rte_zmalloc("pinfo_list",
1537                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1538                            0);
1539         if (!buff) {
1540                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1541                 return -1;
1542         }
1543
1544         ret = i40e_aq_get_ddp_list(
1545                 hw, (void *)buff,
1546                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1547                 0, NULL);
1548         if (ret) {
1549                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1550                 rte_free(buff);
1551                 return -1;
1552         }
1553         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1554         for (i = 0; i < p_list->p_count; i++) {
1555                 p = &p_list->p_info[i];
1556                 if (pinfo->track_id == p->track_id) {
1557                         PMD_DRV_LOG(INFO, "Profile exists.");
1558                         rte_free(buff);
1559                         return 1;
1560                 }
1561         }
1562         for (i = 0; i < p_list->p_count; i++) {
1563                 p = &p_list->p_info[i];
1564                 if ((p->track_id & group_mask) == 0) {
1565                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1566                         rte_free(buff);
1567                         return 2;
1568                 }
1569         }
1570         for (i = 0; i < p_list->p_count; i++) {
1571                 p = &p_list->p_info[i];
1572                 if ((pinfo->track_id & group_mask) !=
1573                     (p->track_id & group_mask)) {
1574                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1575                         rte_free(buff);
1576                         return 3;
1577                 }
1578         }
1579
1580         rte_free(buff);
1581         return 0;
1582 }
1583
1584 int
1585 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1586                                  uint32_t size,
1587                                  enum rte_pmd_i40e_package_op op)
1588 {
1589         struct rte_eth_dev *dev;
1590         struct i40e_hw *hw;
1591         struct i40e_package_header *pkg_hdr;
1592         struct i40e_generic_seg_header *profile_seg_hdr;
1593         struct i40e_generic_seg_header *metadata_seg_hdr;
1594         uint32_t track_id;
1595         uint8_t *profile_info_sec;
1596         int is_exist;
1597         enum i40e_status_code status = I40E_SUCCESS;
1598         static const uint32_t type_mask = 0xff000000;
1599
1600         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1601                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1602                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1603                 PMD_DRV_LOG(ERR, "Operation not supported.");
1604                 return -ENOTSUP;
1605         }
1606
1607         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1608
1609         dev = &rte_eth_devices[port];
1610
1611         if (!is_i40e_supported(dev))
1612                 return -ENOTSUP;
1613
1614         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1615
1616         if (size < (sizeof(struct i40e_package_header) +
1617                     sizeof(struct i40e_metadata_segment) +
1618                     sizeof(uint32_t) * 2)) {
1619                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1620                 return -EINVAL;
1621         }
1622
1623         pkg_hdr = (struct i40e_package_header *)buff;
1624
1625         if (!pkg_hdr) {
1626                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1627                 return -EINVAL;
1628         }
1629
1630         if (pkg_hdr->segment_count < 2) {
1631                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1632                 return -EINVAL;
1633         }
1634
1635         /* Find metadata segment */
1636         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1637                                                         pkg_hdr);
1638         if (!metadata_seg_hdr) {
1639                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1640                 return -EINVAL;
1641         }
1642         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1643         if (track_id == I40E_DDP_TRACKID_INVALID) {
1644                 PMD_DRV_LOG(ERR, "Invalid track_id");
1645                 return -EINVAL;
1646         }
1647
1648         /* force read-only track_id for type 0 */
1649         if ((track_id & type_mask) == 0)
1650                 track_id = 0;
1651
1652         /* Find profile segment */
1653         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1654                                                        pkg_hdr);
1655         if (!profile_seg_hdr) {
1656                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1657                 return -EINVAL;
1658         }
1659
1660         profile_info_sec = rte_zmalloc(
1661                 "i40e_profile_info",
1662                 sizeof(struct i40e_profile_section_header) +
1663                 sizeof(struct i40e_profile_info),
1664                 0);
1665         if (!profile_info_sec) {
1666                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1667                 return -EINVAL;
1668         }
1669
1670         /* Check if the profile already loaded */
1671         i40e_generate_profile_info_sec(
1672                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1673                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1674                 track_id, profile_info_sec,
1675                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1676         is_exist = i40e_check_profile_info(port, profile_info_sec);
1677         if (is_exist < 0) {
1678                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1679                 rte_free(profile_info_sec);
1680                 return -EINVAL;
1681         }
1682
1683         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1684                 if (is_exist) {
1685                         if (is_exist == 1)
1686                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1687                         else if (is_exist == 2)
1688                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1689                         else if (is_exist == 3)
1690                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1691                         i40e_update_customized_info(dev, buff, size, op);
1692                         rte_free(profile_info_sec);
1693                         return -EEXIST;
1694                 }
1695         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1696                 if (is_exist != 1) {
1697                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1698                         rte_free(profile_info_sec);
1699                         return -EACCES;
1700                 }
1701         }
1702
1703         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1704                 status = i40e_rollback_profile(
1705                         hw,
1706                         (struct i40e_profile_segment *)profile_seg_hdr,
1707                         track_id);
1708                 if (status) {
1709                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1710                         rte_free(profile_info_sec);
1711                         return status;
1712                 }
1713         } else {
1714                 status = i40e_write_profile(
1715                         hw,
1716                         (struct i40e_profile_segment *)profile_seg_hdr,
1717                         track_id);
1718                 if (status) {
1719                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1720                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1721                         else
1722                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1723                         rte_free(profile_info_sec);
1724                         return status;
1725                 }
1726         }
1727
1728         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1729                 /* Modify loaded profiles info list */
1730                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1731                 if (status) {
1732                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1733                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1734                         else
1735                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1736                 }
1737         }
1738
1739         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1740             op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1741                 i40e_update_customized_info(dev, buff, size, op);
1742
1743         rte_free(profile_info_sec);
1744         return status;
1745 }
1746
1747 /* Get number of tvl records in the section */
1748 static unsigned int
1749 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1750 {
1751         unsigned int i, nb_rec, nb_tlv = 0;
1752         struct i40e_profile_tlv_section_record *tlv;
1753
1754         if (!sec)
1755                 return nb_tlv;
1756
1757         /* get number of records in the section */
1758         nb_rec = sec->section.size /
1759                                 sizeof(struct i40e_profile_tlv_section_record);
1760         for (i = 0; i < nb_rec; ) {
1761                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1762                 i += tlv->len;
1763                 nb_tlv++;
1764         }
1765         return nb_tlv;
1766 }
1767
1768 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1769         uint8_t *info_buff, uint32_t info_size,
1770         enum rte_pmd_i40e_package_info type)
1771 {
1772         uint32_t ret_size;
1773         struct i40e_package_header *pkg_hdr;
1774         struct i40e_generic_seg_header *i40e_seg_hdr;
1775         struct i40e_generic_seg_header *note_seg_hdr;
1776         struct i40e_generic_seg_header *metadata_seg_hdr;
1777
1778         if (!info_buff) {
1779                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1780                 return -EINVAL;
1781         }
1782
1783         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1784                 sizeof(struct i40e_metadata_segment) +
1785                 sizeof(uint32_t) * 2)) {
1786                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1787                 return -EINVAL;
1788         }
1789
1790         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1791         if (pkg_hdr->segment_count < 2) {
1792                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1793                 return -EINVAL;
1794         }
1795
1796         /* Find metadata segment */
1797         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1798                 pkg_hdr);
1799
1800         /* Find global notes segment */
1801         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1802                 pkg_hdr);
1803
1804         /* Find i40e profile segment */
1805         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1806
1807         /* get global header info */
1808         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1809                 struct rte_pmd_i40e_profile_info *info =
1810                         (struct rte_pmd_i40e_profile_info *)info_buff;
1811
1812                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1813                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1814                         return -EINVAL;
1815                 }
1816
1817                 if (!metadata_seg_hdr) {
1818                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1819                         return -EINVAL;
1820                 }
1821
1822                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1823                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1824                 info->track_id =
1825                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1826
1827                 memcpy(info->name,
1828                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1829                         I40E_DDP_NAME_SIZE);
1830                 memcpy(&info->version,
1831                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1832                         sizeof(struct i40e_ddp_version));
1833                 return I40E_SUCCESS;
1834         }
1835
1836         /* get global note size */
1837         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1838                 if (info_size < sizeof(uint32_t)) {
1839                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1840                         return -EINVAL;
1841                 }
1842                 if (note_seg_hdr == NULL)
1843                         ret_size = 0;
1844                 else
1845                         ret_size = note_seg_hdr->size;
1846                 *(uint32_t *)info_buff = ret_size;
1847                 return I40E_SUCCESS;
1848         }
1849
1850         /* get global note */
1851         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1852                 if (note_seg_hdr == NULL)
1853                         return -ENOTSUP;
1854                 if (info_size < note_seg_hdr->size) {
1855                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1856                         return -EINVAL;
1857                 }
1858                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1859                 return I40E_SUCCESS;
1860         }
1861
1862         /* get i40e segment header info */
1863         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1864                 struct rte_pmd_i40e_profile_info *info =
1865                         (struct rte_pmd_i40e_profile_info *)info_buff;
1866
1867                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1868                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1869                         return -EINVAL;
1870                 }
1871
1872                 if (!metadata_seg_hdr) {
1873                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1874                         return -EINVAL;
1875                 }
1876
1877                 if (!i40e_seg_hdr) {
1878                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1879                         return -EINVAL;
1880                 }
1881
1882                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1883                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1884                 info->track_id =
1885                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1886
1887                 memcpy(info->name,
1888                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1889                         I40E_DDP_NAME_SIZE);
1890                 memcpy(&info->version,
1891                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1892                         sizeof(struct i40e_ddp_version));
1893                 return I40E_SUCCESS;
1894         }
1895
1896         /* get number of devices */
1897         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1898                 if (info_size < sizeof(uint32_t)) {
1899                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1900                         return -EINVAL;
1901                 }
1902                 *(uint32_t *)info_buff =
1903                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1904                 return I40E_SUCCESS;
1905         }
1906
1907         /* get list of devices */
1908         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1909                 uint32_t dev_num;
1910                 dev_num =
1911                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1912                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1913                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1914                         return -EINVAL;
1915                 }
1916                 memcpy(info_buff,
1917                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1918                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1919                 return I40E_SUCCESS;
1920         }
1921
1922         /* get number of protocols */
1923         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1924                 struct i40e_profile_section_header *proto;
1925
1926                 if (info_size < sizeof(uint32_t)) {
1927                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1928                         return -EINVAL;
1929                 }
1930                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1931                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1932                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1933                 return I40E_SUCCESS;
1934         }
1935
1936         /* get list of protocols */
1937         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1938                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1939                 struct rte_pmd_i40e_proto_info *pinfo;
1940                 struct i40e_profile_section_header *proto;
1941                 struct i40e_profile_tlv_section_record *tlv;
1942
1943                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1944                 nb_proto_info = info_size /
1945                                         sizeof(struct rte_pmd_i40e_proto_info);
1946                 for (i = 0; i < nb_proto_info; i++) {
1947                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1948                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1949                 }
1950                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1951                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1952                 nb_tlv = i40e_get_tlv_section_size(proto);
1953                 if (nb_tlv == 0)
1954                         return I40E_SUCCESS;
1955                 if (nb_proto_info < nb_tlv) {
1956                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1957                         return -EINVAL;
1958                 }
1959                 /* get number of records in the section */
1960                 nb_rec = proto->section.size /
1961                                 sizeof(struct i40e_profile_tlv_section_record);
1962                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1963                 for (i = j = 0; i < nb_rec; j++) {
1964                         pinfo[j].proto_id = tlv->data[0];
1965                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1966                                  (const char *)&tlv->data[1]);
1967                         i += tlv->len;
1968                         tlv = &tlv[tlv->len];
1969                 }
1970                 return I40E_SUCCESS;
1971         }
1972
1973         /* get number of packet classification types */
1974         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1975                 struct i40e_profile_section_header *pctype;
1976
1977                 if (info_size < sizeof(uint32_t)) {
1978                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1979                         return -EINVAL;
1980                 }
1981                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1982                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1983                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1984                 return I40E_SUCCESS;
1985         }
1986
1987         /* get list of packet classification types */
1988         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1989                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1990                 struct rte_pmd_i40e_ptype_info *pinfo;
1991                 struct i40e_profile_section_header *pctype;
1992                 struct i40e_profile_tlv_section_record *tlv;
1993
1994                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1995                 nb_proto_info = info_size /
1996                                         sizeof(struct rte_pmd_i40e_ptype_info);
1997                 for (i = 0; i < nb_proto_info; i++)
1998                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1999                                sizeof(struct rte_pmd_i40e_ptype_info));
2000                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2001                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2002                 nb_tlv = i40e_get_tlv_section_size(pctype);
2003                 if (nb_tlv == 0)
2004                         return I40E_SUCCESS;
2005                 if (nb_proto_info < nb_tlv) {
2006                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2007                         return -EINVAL;
2008                 }
2009
2010                 /* get number of records in the section */
2011                 nb_rec = pctype->section.size /
2012                                 sizeof(struct i40e_profile_tlv_section_record);
2013                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2014                 for (i = j = 0; i < nb_rec; j++) {
2015                         memcpy(&pinfo[j], tlv->data,
2016                                sizeof(struct rte_pmd_i40e_ptype_info));
2017                         i += tlv->len;
2018                         tlv = &tlv[tlv->len];
2019                 }
2020                 return I40E_SUCCESS;
2021         }
2022
2023         /* get number of packet types */
2024         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2025                 struct i40e_profile_section_header *ptype;
2026
2027                 if (info_size < sizeof(uint32_t)) {
2028                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2029                         return -EINVAL;
2030                 }
2031                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2032                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2033                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2034                 return I40E_SUCCESS;
2035         }
2036
2037         /* get list of packet types */
2038         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2039                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2040                 struct rte_pmd_i40e_ptype_info *pinfo;
2041                 struct i40e_profile_section_header *ptype;
2042                 struct i40e_profile_tlv_section_record *tlv;
2043
2044                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2045                 nb_proto_info = info_size /
2046                                         sizeof(struct rte_pmd_i40e_ptype_info);
2047                 for (i = 0; i < nb_proto_info; i++)
2048                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2049                                sizeof(struct rte_pmd_i40e_ptype_info));
2050                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2051                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2052                 nb_tlv = i40e_get_tlv_section_size(ptype);
2053                 if (nb_tlv == 0)
2054                         return I40E_SUCCESS;
2055                 if (nb_proto_info < nb_tlv) {
2056                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2057                         return -EINVAL;
2058                 }
2059                 /* get number of records in the section */
2060                 nb_rec = ptype->section.size /
2061                                 sizeof(struct i40e_profile_tlv_section_record);
2062                 for (i = j = 0; i < nb_rec; j++) {
2063                         tlv = (struct i40e_profile_tlv_section_record *)
2064                                                                 &ptype[1 + i];
2065                         memcpy(&pinfo[j], tlv->data,
2066                                sizeof(struct rte_pmd_i40e_ptype_info));
2067                         i += tlv->len;
2068                 }
2069                 return I40E_SUCCESS;
2070         }
2071
2072         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2073         return -EINVAL;
2074 }
2075
2076 int
2077 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2078 {
2079         struct rte_eth_dev *dev;
2080         struct i40e_hw *hw;
2081         enum i40e_status_code status = I40E_SUCCESS;
2082
2083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2084
2085         dev = &rte_eth_devices[port];
2086
2087         if (!is_i40e_supported(dev))
2088                 return -ENOTSUP;
2089
2090         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2091                 return -EINVAL;
2092
2093         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094
2095         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2096                                       size, 0, NULL);
2097
2098         return status;
2099 }
2100
2101 static int check_invalid_pkt_type(uint32_t pkt_type)
2102 {
2103         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2104
2105         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2106         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2107         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2108         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2109         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2110         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2111         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2112
2113         if (l2 &&
2114             l2 != RTE_PTYPE_L2_ETHER &&
2115             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2116             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2117             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2118             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2119             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2120             l2 != RTE_PTYPE_L2_ETHER_QINQ)
2121                 return -1;
2122
2123         if (l3 &&
2124             l3 != RTE_PTYPE_L3_IPV4 &&
2125             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2126             l3 != RTE_PTYPE_L3_IPV6 &&
2127             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2128             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2129             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2130                 return -1;
2131
2132         if (l4 &&
2133             l4 != RTE_PTYPE_L4_TCP &&
2134             l4 != RTE_PTYPE_L4_UDP &&
2135             l4 != RTE_PTYPE_L4_FRAG &&
2136             l4 != RTE_PTYPE_L4_SCTP &&
2137             l4 != RTE_PTYPE_L4_ICMP &&
2138             l4 != RTE_PTYPE_L4_NONFRAG)
2139                 return -1;
2140
2141         if (tnl &&
2142             tnl != RTE_PTYPE_TUNNEL_IP &&
2143             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2144             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2145             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2146             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2147             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2148             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2149             tnl != RTE_PTYPE_TUNNEL_GTPU)
2150                 return -1;
2151
2152         if (il2 &&
2153             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2154             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2155             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2156                 return -1;
2157
2158         if (il3 &&
2159             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2160             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2161             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2162             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2163             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2164             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2165                 return -1;
2166
2167         if (il4 &&
2168             il4 != RTE_PTYPE_INNER_L4_TCP &&
2169             il4 != RTE_PTYPE_INNER_L4_UDP &&
2170             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2171             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2172             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2173             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2174                 return -1;
2175
2176         return 0;
2177 }
2178
2179 static int check_invalid_ptype_mapping(
2180                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2181                 uint16_t count)
2182 {
2183         int i;
2184
2185         for (i = 0; i < count; i++) {
2186                 uint16_t ptype = mapping_table[i].hw_ptype;
2187                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2188
2189                 if (ptype >= I40E_MAX_PKT_TYPE)
2190                         return -1;
2191
2192                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2193                         continue;
2194
2195                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2196                         continue;
2197
2198                 if (check_invalid_pkt_type(pkt_type))
2199                         return -1;
2200         }
2201
2202         return 0;
2203 }
2204
2205 int
2206 rte_pmd_i40e_ptype_mapping_update(
2207                         uint16_t port,
2208                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2209                         uint16_t count,
2210                         uint8_t exclusive)
2211 {
2212         struct rte_eth_dev *dev;
2213         struct i40e_adapter *ad;
2214         int i;
2215
2216         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2217
2218         dev = &rte_eth_devices[port];
2219
2220         if (!is_i40e_supported(dev))
2221                 return -ENOTSUP;
2222
2223         if (count > I40E_MAX_PKT_TYPE)
2224                 return -EINVAL;
2225
2226         if (check_invalid_ptype_mapping(mapping_items, count))
2227                 return -EINVAL;
2228
2229         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2230
2231         if (exclusive) {
2232                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2233                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2234         }
2235
2236         for (i = 0; i < count; i++)
2237                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2238                         = mapping_items[i].sw_ptype;
2239
2240         return 0;
2241 }
2242
2243 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2244 {
2245         struct rte_eth_dev *dev;
2246
2247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2248
2249         dev = &rte_eth_devices[port];
2250
2251         if (!is_i40e_supported(dev))
2252                 return -ENOTSUP;
2253
2254         i40e_set_default_ptype_table(dev);
2255
2256         return 0;
2257 }
2258
2259 int rte_pmd_i40e_ptype_mapping_get(
2260                         uint16_t port,
2261                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2262                         uint16_t size,
2263                         uint16_t *count,
2264                         uint8_t valid_only)
2265 {
2266         struct rte_eth_dev *dev;
2267         struct i40e_adapter *ad;
2268         int n = 0;
2269         uint16_t i;
2270
2271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2272
2273         dev = &rte_eth_devices[port];
2274
2275         if (!is_i40e_supported(dev))
2276                 return -ENOTSUP;
2277
2278         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2279
2280         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2281                 if (n >= size)
2282                         break;
2283                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2284                         continue;
2285                 mapping_items[n].hw_ptype = i;
2286                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2287                 n++;
2288         }
2289
2290         *count = n;
2291         return 0;
2292 }
2293
2294 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2295                                        uint32_t target,
2296                                        uint8_t mask,
2297                                        uint32_t pkt_type)
2298 {
2299         struct rte_eth_dev *dev;
2300         struct i40e_adapter *ad;
2301         uint16_t i;
2302
2303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2304
2305         dev = &rte_eth_devices[port];
2306
2307         if (!is_i40e_supported(dev))
2308                 return -ENOTSUP;
2309
2310         if (!mask && check_invalid_pkt_type(target))
2311                 return -EINVAL;
2312
2313         if (check_invalid_pkt_type(pkt_type))
2314                 return -EINVAL;
2315
2316         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2317
2318         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2319                 if (mask) {
2320                         if ((target | ad->ptype_tbl[i]) == target &&
2321                             (target & ad->ptype_tbl[i]))
2322                                 ad->ptype_tbl[i] = pkt_type;
2323                 } else {
2324                         if (ad->ptype_tbl[i] == target)
2325                                 ad->ptype_tbl[i] = pkt_type;
2326                 }
2327         }
2328
2329         return 0;
2330 }
2331
2332 int
2333 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2334                              struct ether_addr *mac_addr)
2335 {
2336         struct rte_eth_dev *dev;
2337         struct i40e_pf_vf *vf;
2338         struct i40e_vsi *vsi;
2339         struct i40e_pf *pf;
2340         struct i40e_mac_filter_info mac_filter;
2341         int ret;
2342
2343         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2344                 return -EINVAL;
2345
2346         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2347
2348         dev = &rte_eth_devices[port];
2349
2350         if (!is_i40e_supported(dev))
2351                 return -ENOTSUP;
2352
2353         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2354
2355         if (vf_id >= pf->vf_num || !pf->vfs)
2356                 return -EINVAL;
2357
2358         vf = &pf->vfs[vf_id];
2359         vsi = vf->vsi;
2360         if (!vsi) {
2361                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2362                 return -EINVAL;
2363         }
2364
2365         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2366         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2367         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2368         if (ret != I40E_SUCCESS) {
2369                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2370                 return -1;
2371         }
2372
2373         return 0;
2374 }
2375
2376 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2377 {
2378         struct rte_eth_dev *dev;
2379
2380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2381
2382         dev = &rte_eth_devices[port];
2383
2384         if (!is_i40e_supported(dev))
2385                 return -ENOTSUP;
2386
2387         i40e_set_default_pctype_table(dev);
2388
2389         return 0;
2390 }
2391
2392 int rte_pmd_i40e_flow_type_mapping_get(
2393                         uint16_t port,
2394                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2395 {
2396         struct rte_eth_dev *dev;
2397         struct i40e_adapter *ad;
2398         uint16_t i;
2399
2400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2401
2402         dev = &rte_eth_devices[port];
2403
2404         if (!is_i40e_supported(dev))
2405                 return -ENOTSUP;
2406
2407         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2408
2409         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2410                 mapping_items[i].flow_type = i;
2411                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2412         }
2413
2414         return 0;
2415 }
2416
2417 int
2418 rte_pmd_i40e_flow_type_mapping_update(
2419                         uint16_t port,
2420                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2421                         uint16_t count,
2422                         uint8_t exclusive)
2423 {
2424         struct rte_eth_dev *dev;
2425         struct i40e_adapter *ad;
2426         int i;
2427
2428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2429
2430         dev = &rte_eth_devices[port];
2431
2432         if (!is_i40e_supported(dev))
2433                 return -ENOTSUP;
2434
2435         if (count > I40E_FLOW_TYPE_MAX)
2436                 return -EINVAL;
2437
2438         for (i = 0; i < count; i++)
2439                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2440                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2441                     (mapping_items[i].pctype &
2442                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2443                         return -EINVAL;
2444
2445         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2446
2447         if (exclusive) {
2448                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2449                         ad->pctypes_tbl[i] = 0ULL;
2450                 ad->flow_types_mask = 0ULL;
2451         }
2452
2453         for (i = 0; i < count; i++) {
2454                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2455                                                 mapping_items[i].pctype;
2456                 if (mapping_items[i].pctype)
2457                         ad->flow_types_mask |=
2458                                         (1ULL << mapping_items[i].flow_type);
2459                 else
2460                         ad->flow_types_mask &=
2461                                         ~(1ULL << mapping_items[i].flow_type);
2462         }
2463
2464         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2465                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2466
2467         return 0;
2468 }
2469
2470 int
2471 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2472 {
2473         struct rte_eth_dev *dev;
2474         struct ether_addr *mac;
2475         struct i40e_pf *pf;
2476         int vf_id;
2477         struct i40e_pf_vf *vf;
2478         uint16_t vf_num;
2479
2480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2481         dev = &rte_eth_devices[port];
2482
2483         if (!is_i40e_supported(dev))
2484                 return -ENOTSUP;
2485
2486         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2487         vf_num = pf->vf_num;
2488
2489         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2490                 vf = &pf->vfs[vf_id];
2491                 mac = &vf->mac_addr;
2492
2493                 if (is_same_ether_addr(mac, vf_mac))
2494                         return vf_id;
2495         }
2496
2497         return -EINVAL;
2498 }
2499
2500 static int
2501 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2502                               struct i40e_pf *pf)
2503 {
2504         uint16_t i;
2505         struct i40e_vsi *vsi = pf->main_vsi;
2506         uint16_t queue_offset, bsf, tc_index;
2507         struct i40e_vsi_context ctxt;
2508         struct i40e_aqc_vsi_properties_data *vsi_info;
2509         struct i40e_queue_regions *region_info =
2510                                 &pf->queue_region;
2511         int32_t ret = -EINVAL;
2512
2513         if (!region_info->queue_region_number) {
2514                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2515                 return ret;
2516         }
2517
2518         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2519
2520         /* Update Queue Pairs Mapping for currently enabled UPs */
2521         ctxt.seid = vsi->seid;
2522         ctxt.pf_num = hw->pf_id;
2523         ctxt.vf_num = 0;
2524         ctxt.uplink_seid = vsi->uplink_seid;
2525         ctxt.info = vsi->info;
2526         vsi_info = &ctxt.info;
2527
2528         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2529         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2530
2531         /* Configure queue region and queue mapping parameters,
2532          * for enabled queue region, allocate queues to this region.
2533          */
2534
2535         for (i = 0; i < region_info->queue_region_number; i++) {
2536                 tc_index = region_info->region[i].region_id;
2537                 bsf = rte_bsf32(region_info->region[i].queue_num);
2538                 queue_offset = region_info->region[i].queue_start_index;
2539                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2540                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2541                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2542         }
2543
2544         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2545         vsi_info->mapping_flags |=
2546                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2547         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2548         vsi_info->valid_sections |=
2549                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2550
2551         /* Update the VSI after updating the VSI queue-mapping information */
2552         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2553         if (ret) {
2554                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2555                                 hw->aq.asq_last_status);
2556                 return ret;
2557         }
2558         /* update the local VSI info with updated queue map */
2559         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2560                                         sizeof(vsi->info.tc_mapping));
2561         rte_memcpy(&vsi->info.queue_mapping,
2562                         &ctxt.info.queue_mapping,
2563                         sizeof(vsi->info.queue_mapping));
2564         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2565         vsi->info.valid_sections = 0;
2566
2567         return 0;
2568 }
2569
2570
2571 static int
2572 i40e_queue_region_set_region(struct i40e_pf *pf,
2573                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2574 {
2575         uint16_t i;
2576         struct i40e_vsi *main_vsi = pf->main_vsi;
2577         struct i40e_queue_regions *info = &pf->queue_region;
2578         int32_t ret = -EINVAL;
2579
2580         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2581                                 conf_ptr->queue_num <= 64)) {
2582                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2583                         "total number of queues do not exceed the VSI allocation");
2584                 return ret;
2585         }
2586
2587         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2588                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2589                 return ret;
2590         }
2591
2592         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2593                                         > main_vsi->nb_used_qps) {
2594                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2595                 return ret;
2596         }
2597
2598         for (i = 0; i < info->queue_region_number; i++)
2599                 if (conf_ptr->region_id == info->region[i].region_id)
2600                         break;
2601
2602         if (i == info->queue_region_number &&
2603                                 i <= I40E_REGION_MAX_INDEX) {
2604                 info->region[i].region_id = conf_ptr->region_id;
2605                 info->region[i].queue_num = conf_ptr->queue_num;
2606                 info->region[i].queue_start_index =
2607                         conf_ptr->queue_start_index;
2608                 info->queue_region_number++;
2609         } else {
2610                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2611                 return ret;
2612         }
2613
2614         return 0;
2615 }
2616
2617 static int
2618 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2619                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2620 {
2621         int32_t ret = -EINVAL;
2622         struct i40e_queue_regions *info = &pf->queue_region;
2623         uint16_t i, j;
2624         uint16_t region_index, flowtype_index;
2625
2626         /* For the pctype or hardware flowtype of packet,
2627          * the specific index for each type has been defined
2628          * in file i40e_type.h as enum i40e_filter_pctype.
2629          */
2630
2631         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2632                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2633                 return ret;
2634         }
2635
2636         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2637                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2638                 return ret;
2639         }
2640
2641
2642         for (i = 0; i < info->queue_region_number; i++)
2643                 if (rss_region_conf->region_id == info->region[i].region_id)
2644                         break;
2645
2646         if (i == info->queue_region_number) {
2647                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2648                 ret = -EINVAL;
2649                 return ret;
2650         }
2651         region_index = i;
2652
2653         for (i = 0; i < info->queue_region_number; i++) {
2654                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2655                         if (rss_region_conf->hw_flowtype ==
2656                                 info->region[i].hw_flowtype[j]) {
2657                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2658                                 return 0;
2659                         }
2660                 }
2661         }
2662
2663         flowtype_index = info->region[region_index].flowtype_num;
2664         info->region[region_index].hw_flowtype[flowtype_index] =
2665                                         rss_region_conf->hw_flowtype;
2666         info->region[region_index].flowtype_num++;
2667
2668         return 0;
2669 }
2670
2671 static void
2672 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2673                                 struct i40e_pf *pf)
2674 {
2675         uint8_t hw_flowtype;
2676         uint32_t pfqf_hregion;
2677         uint16_t i, j, index;
2678         struct i40e_queue_regions *info = &pf->queue_region;
2679
2680         /* For the pctype or hardware flowtype of packet,
2681          * the specific index for each type has been defined
2682          * in file i40e_type.h as enum i40e_filter_pctype.
2683          */
2684
2685         for (i = 0; i < info->queue_region_number; i++) {
2686                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2687                         hw_flowtype = info->region[i].hw_flowtype[j];
2688                         index = hw_flowtype >> 3;
2689                         pfqf_hregion =
2690                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2691
2692                         if ((hw_flowtype & 0x7) == 0) {
2693                                 pfqf_hregion |= info->region[i].region_id <<
2694                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2695                                 pfqf_hregion |= 1 <<
2696                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2697                         } else if ((hw_flowtype & 0x7) == 1) {
2698                                 pfqf_hregion |= info->region[i].region_id  <<
2699                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2700                                 pfqf_hregion |= 1 <<
2701                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2702                         } else if ((hw_flowtype & 0x7) == 2) {
2703                                 pfqf_hregion |= info->region[i].region_id  <<
2704                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2705                                 pfqf_hregion |= 1 <<
2706                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2707                         } else if ((hw_flowtype & 0x7) == 3) {
2708                                 pfqf_hregion |= info->region[i].region_id  <<
2709                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2710                                 pfqf_hregion |= 1 <<
2711                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2712                         } else if ((hw_flowtype & 0x7) == 4) {
2713                                 pfqf_hregion |= info->region[i].region_id  <<
2714                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2715                                 pfqf_hregion |= 1 <<
2716                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2717                         } else if ((hw_flowtype & 0x7) == 5) {
2718                                 pfqf_hregion |= info->region[i].region_id  <<
2719                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2720                                 pfqf_hregion |= 1 <<
2721                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2722                         } else if ((hw_flowtype & 0x7) == 6) {
2723                                 pfqf_hregion |= info->region[i].region_id  <<
2724                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2725                                 pfqf_hregion |= 1 <<
2726                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2727                         } else {
2728                                 pfqf_hregion |= info->region[i].region_id  <<
2729                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2730                                 pfqf_hregion |= 1 <<
2731                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2732                         }
2733
2734                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2735                                                 pfqf_hregion);
2736                 }
2737         }
2738 }
2739
2740 static int
2741 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2742                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2743 {
2744         struct i40e_queue_regions *info = &pf->queue_region;
2745         int32_t ret = -EINVAL;
2746         uint16_t i, j, region_index;
2747
2748         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2749                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2750                 return ret;
2751         }
2752
2753         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2754                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2755                 return ret;
2756         }
2757
2758         for (i = 0; i < info->queue_region_number; i++)
2759                 if (rss_region_conf->region_id == info->region[i].region_id)
2760                         break;
2761
2762         if (i == info->queue_region_number) {
2763                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2764                 ret = -EINVAL;
2765                 return ret;
2766         }
2767
2768         region_index = i;
2769
2770         for (i = 0; i < info->queue_region_number; i++) {
2771                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2772                         if (info->region[i].user_priority[j] ==
2773                                 rss_region_conf->user_priority) {
2774                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2775                                 return 0;
2776                         }
2777                 }
2778         }
2779
2780         j = info->region[region_index].user_priority_num;
2781         info->region[region_index].user_priority[j] =
2782                                         rss_region_conf->user_priority;
2783         info->region[region_index].user_priority_num++;
2784
2785         return 0;
2786 }
2787
2788 static int
2789 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2790                                 struct i40e_pf *pf)
2791 {
2792         struct i40e_dcbx_config dcb_cfg_local;
2793         struct i40e_dcbx_config *dcb_cfg;
2794         struct i40e_queue_regions *info = &pf->queue_region;
2795         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2796         int32_t ret = -EINVAL;
2797         uint16_t i, j, prio_index, region_index;
2798         uint8_t tc_map, tc_bw, bw_lf;
2799
2800         if (!info->queue_region_number) {
2801                 PMD_DRV_LOG(ERR, "No queue region been set before");
2802                 return ret;
2803         }
2804
2805         dcb_cfg = &dcb_cfg_local;
2806         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2807
2808         /* assume each tc has the same bw */
2809         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2810         for (i = 0; i < info->queue_region_number; i++)
2811                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2812         /* to ensure the sum of tcbw is equal to 100 */
2813         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2814         for (i = 0; i < bw_lf; i++)
2815                 dcb_cfg->etscfg.tcbwtable[i]++;
2816
2817         /* assume each tc has the same Transmission Selection Algorithm */
2818         for (i = 0; i < info->queue_region_number; i++)
2819                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2820
2821         for (i = 0; i < info->queue_region_number; i++) {
2822                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2823                         prio_index = info->region[i].user_priority[j];
2824                         region_index = info->region[i].region_id;
2825                         dcb_cfg->etscfg.prioritytable[prio_index] =
2826                                                 region_index;
2827                 }
2828         }
2829
2830         /* FW needs one App to configure HW */
2831         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2832         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2833         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2834         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2835
2836         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2837
2838         dcb_cfg->pfc.willing = 0;
2839         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2840         dcb_cfg->pfc.pfcenable = tc_map;
2841
2842         /* Copy the new config to the current config */
2843         *old_cfg = *dcb_cfg;
2844         old_cfg->etsrec = old_cfg->etscfg;
2845         ret = i40e_set_dcb_config(hw);
2846
2847         if (ret) {
2848                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2849                          i40e_stat_str(hw, ret),
2850                          i40e_aq_str(hw, hw->aq.asq_last_status));
2851                 return ret;
2852         }
2853
2854         return 0;
2855 }
2856
2857 int
2858 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2859         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2860 {
2861         int32_t ret = -EINVAL;
2862         struct i40e_queue_regions *info = &pf->queue_region;
2863         struct i40e_vsi *main_vsi = pf->main_vsi;
2864
2865         if (on) {
2866                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2867
2868                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2869                 if (ret != I40E_SUCCESS) {
2870                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2871                         return ret;
2872                 }
2873
2874                 ret = i40e_queue_region_dcb_configure(hw, pf);
2875                 if (ret != I40E_SUCCESS) {
2876                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2877                         return ret;
2878                 }
2879
2880                 return 0;
2881         }
2882
2883         if (info->queue_region_number) {
2884                 info->queue_region_number = 1;
2885                 info->region[0].queue_num = main_vsi->nb_used_qps;
2886                 info->region[0].queue_start_index = 0;
2887
2888                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2889                 if (ret != I40E_SUCCESS)
2890                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2891
2892                 ret = i40e_dcb_init_configure(dev, TRUE);
2893                 if (ret != I40E_SUCCESS) {
2894                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2895                         pf->flags &= ~I40E_FLAG_DCB;
2896                 }
2897
2898                 i40e_init_queue_region_conf(dev);
2899         }
2900         return 0;
2901 }
2902
2903 static int
2904 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2905 {
2906         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2907         uint64_t hena;
2908
2909         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2910         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2911
2912         if (!hena)
2913                 return -ENOTSUP;
2914
2915         return 0;
2916 }
2917
2918 static int
2919 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2920                 struct i40e_queue_regions *regions_ptr)
2921 {
2922         struct i40e_queue_regions *info = &pf->queue_region;
2923
2924         rte_memcpy(regions_ptr, info,
2925                         sizeof(struct i40e_queue_regions));
2926
2927         return 0;
2928 }
2929
2930 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2931                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2932 {
2933         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2936         int32_t ret;
2937
2938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2939
2940         if (!is_i40e_supported(dev))
2941                 return -ENOTSUP;
2942
2943         if (!(!i40e_queue_region_pf_check_rss(pf)))
2944                 return -ENOTSUP;
2945
2946         /* This queue region feature only support pf by now. It should
2947          * be called after dev_start, and will be clear after dev_stop.
2948          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2949          * is just an enable function which server for other configuration,
2950          * it is for all configuration about queue region from up layer,
2951          * at first will only keep in DPDK softwarestored in driver,
2952          * only after "FLUSH_ON", it commit all configuration to HW.
2953          * Because PMD had to set hardware configuration at a time, so
2954          * it will record all up layer command at first.
2955          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2956          * just clean all configuration about queue region just now,
2957          * and restore all to DPDK i40e driver default
2958          * config when start up.
2959          */
2960
2961         switch (op_type) {
2962         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2963                 ret = i40e_queue_region_set_region(pf,
2964                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2965                 break;
2966         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2967                 ret = i40e_queue_region_set_flowtype(pf,
2968                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2969                 break;
2970         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2971                 ret = i40e_queue_region_set_user_priority(pf,
2972                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2973                 break;
2974         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2975                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2976                 break;
2977         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2978                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2979                 break;
2980         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2981                 ret = i40e_queue_region_get_all_info(pf,
2982                                 (struct i40e_queue_regions *)arg);
2983                 break;
2984         default:
2985                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2986                             op_type);
2987                 ret = -EINVAL;
2988         }
2989
2990         I40E_WRITE_FLUSH(hw);
2991
2992         return ret;
2993 }
2994
2995 int rte_pmd_i40e_flow_add_del_packet_template(
2996                         uint16_t port,
2997                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2998                         uint8_t add)
2999 {
3000         struct rte_eth_dev *dev = &rte_eth_devices[port];
3001         struct i40e_fdir_filter_conf filter_conf;
3002
3003         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3004
3005         if (!is_i40e_supported(dev))
3006                 return -ENOTSUP;
3007
3008         memset(&filter_conf, 0, sizeof(filter_conf));
3009         filter_conf.soft_id = conf->soft_id;
3010         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3011         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3012         filter_conf.input.flow.raw_flow.length = conf->input.length;
3013         filter_conf.input.flow_ext.pkt_template = true;
3014
3015         filter_conf.action.rx_queue = conf->action.rx_queue;
3016         filter_conf.action.behavior =
3017                 (enum i40e_fdir_behavior)conf->action.behavior;
3018         filter_conf.action.report_status =
3019                 (enum i40e_fdir_status)conf->action.report_status;
3020         filter_conf.action.flex_off = conf->action.flex_off;
3021
3022         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3023 }