New upstream version 18.02
[deb_dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_tailq.h>
7
8 #include "base/i40e_prototype.h"
9 #include "base/i40e_dcb.h"
10 #include "i40e_ethdev.h"
11 #include "i40e_pf.h"
12 #include "i40e_rxtx.h"
13 #include "rte_pmd_i40e.h"
14
15 int
16 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
17 {
18         struct rte_eth_dev *dev;
19         struct i40e_pf *pf;
20
21         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
22
23         dev = &rte_eth_devices[port];
24
25         if (!is_i40e_supported(dev))
26                 return -ENOTSUP;
27
28         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
29
30         if (vf >= pf->vf_num || !pf->vfs) {
31                 PMD_DRV_LOG(ERR, "Invalid argument.");
32                 return -EINVAL;
33         }
34
35         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
36
37         return 0;
38 }
39
40 int
41 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
42 {
43         struct rte_eth_dev *dev;
44         struct i40e_pf *pf;
45         struct i40e_vsi *vsi;
46         struct i40e_hw *hw;
47         struct i40e_vsi_context ctxt;
48         int ret;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf_id >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         vsi = pf->vfs[vf_id].vsi;
65         if (!vsi) {
66                 PMD_DRV_LOG(ERR, "Invalid VSI.");
67                 return -EINVAL;
68         }
69
70         /* Check if it has been already on or off */
71         if (vsi->info.valid_sections &
72                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
73                 if (on) {
74                         if ((vsi->info.sec_flags &
75                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
76                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
77                                 return 0; /* already on */
78                 } else {
79                         if ((vsi->info.sec_flags &
80                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
81                                 return 0; /* already off */
82                 }
83         }
84
85         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
86         if (on)
87                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
88         else
89                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
90
91         memset(&ctxt, 0, sizeof(ctxt));
92         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
93         ctxt.seid = vsi->seid;
94
95         hw = I40E_VSI_TO_HW(vsi);
96         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
97         if (ret != I40E_SUCCESS) {
98                 ret = -ENOTSUP;
99                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
100         }
101
102         return ret;
103 }
104
105 static int
106 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
107 {
108         uint32_t j, k;
109         uint16_t vlan_id;
110         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
111         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
112         int ret;
113
114         for (j = 0; j < I40E_VFTA_SIZE; j++) {
115                 if (!vsi->vfta[j])
116                         continue;
117
118                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
119                         if (!(vsi->vfta[j] & (1 << k)))
120                                 continue;
121
122                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
123                         if (!vlan_id)
124                                 continue;
125
126                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
127                         if (add)
128                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
129                                                        &vlan_data, 1, NULL);
130                         else
131                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
132                                                           &vlan_data, 1, NULL);
133                         if (ret != I40E_SUCCESS) {
134                                 PMD_DRV_LOG(ERR,
135                                             "Failed to add/rm vlan filter");
136                                 return ret;
137                         }
138                 }
139         }
140
141         return I40E_SUCCESS;
142 }
143
144 int
145 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
146 {
147         struct rte_eth_dev *dev;
148         struct i40e_pf *pf;
149         struct i40e_vsi *vsi;
150         struct i40e_hw *hw;
151         struct i40e_vsi_context ctxt;
152         int ret;
153
154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
155
156         dev = &rte_eth_devices[port];
157
158         if (!is_i40e_supported(dev))
159                 return -ENOTSUP;
160
161         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
162
163         if (vf_id >= pf->vf_num || !pf->vfs) {
164                 PMD_DRV_LOG(ERR, "Invalid argument.");
165                 return -EINVAL;
166         }
167
168         vsi = pf->vfs[vf_id].vsi;
169         if (!vsi) {
170                 PMD_DRV_LOG(ERR, "Invalid VSI.");
171                 return -EINVAL;
172         }
173
174         /* Check if it has been already on or off */
175         if (vsi->vlan_anti_spoof_on == on)
176                 return 0; /* already on or off */
177
178         vsi->vlan_anti_spoof_on = on;
179         if (!vsi->vlan_filter_on) {
180                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
181                 if (ret) {
182                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
183                         return -ENOTSUP;
184                 }
185         }
186
187         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
188         if (on)
189                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
190         else
191                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
192
193         memset(&ctxt, 0, sizeof(ctxt));
194         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
195         ctxt.seid = vsi->seid;
196
197         hw = I40E_VSI_TO_HW(vsi);
198         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
199         if (ret != I40E_SUCCESS) {
200                 ret = -ENOTSUP;
201                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
202         }
203
204         return ret;
205 }
206
207 static int
208 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
209 {
210         struct i40e_mac_filter *f;
211         struct i40e_macvlan_filter *mv_f;
212         int i, vlan_num;
213         enum rte_mac_filter_type filter_type;
214         int ret = I40E_SUCCESS;
215         void *temp;
216
217         /* remove all the MACs */
218         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
219                 vlan_num = vsi->vlan_num;
220                 filter_type = f->mac_info.filter_type;
221                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
222                     filter_type == RTE_MACVLAN_HASH_MATCH) {
223                         if (vlan_num == 0) {
224                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
225                                 return I40E_ERR_PARAM;
226                         }
227                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
228                            filter_type == RTE_MAC_HASH_MATCH)
229                         vlan_num = 1;
230
231                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
232                 if (!mv_f) {
233                         PMD_DRV_LOG(ERR, "failed to allocate memory");
234                         return I40E_ERR_NO_MEMORY;
235                 }
236
237                 for (i = 0; i < vlan_num; i++) {
238                         mv_f[i].filter_type = filter_type;
239                         rte_memcpy(&mv_f[i].macaddr,
240                                          &f->mac_info.mac_addr,
241                                          ETH_ADDR_LEN);
242                 }
243                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
244                     filter_type == RTE_MACVLAN_HASH_MATCH) {
245                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
246                                                          &f->mac_info.mac_addr);
247                         if (ret != I40E_SUCCESS) {
248                                 rte_free(mv_f);
249                                 return ret;
250                         }
251                 }
252
253                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
254                 if (ret != I40E_SUCCESS) {
255                         rte_free(mv_f);
256                         return ret;
257                 }
258
259                 rte_free(mv_f);
260                 ret = I40E_SUCCESS;
261         }
262
263         return ret;
264 }
265
266 static int
267 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
268 {
269         struct i40e_mac_filter *f;
270         struct i40e_macvlan_filter *mv_f;
271         int i, vlan_num = 0;
272         int ret = I40E_SUCCESS;
273         void *temp;
274
275         /* restore all the MACs */
276         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
277                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
278                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
279                         /**
280                          * If vlan_num is 0, that's the first time to add mac,
281                          * set mask for vlan_id 0.
282                          */
283                         if (vsi->vlan_num == 0) {
284                                 i40e_set_vlan_filter(vsi, 0, 1);
285                                 vsi->vlan_num = 1;
286                         }
287                         vlan_num = vsi->vlan_num;
288                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
289                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
290                         vlan_num = 1;
291
292                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
293                 if (!mv_f) {
294                         PMD_DRV_LOG(ERR, "failed to allocate memory");
295                         return I40E_ERR_NO_MEMORY;
296                 }
297
298                 for (i = 0; i < vlan_num; i++) {
299                         mv_f[i].filter_type = f->mac_info.filter_type;
300                         rte_memcpy(&mv_f[i].macaddr,
301                                          &f->mac_info.mac_addr,
302                                          ETH_ADDR_LEN);
303                 }
304
305                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
306                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
307                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
308                                                          &f->mac_info.mac_addr);
309                         if (ret != I40E_SUCCESS) {
310                                 rte_free(mv_f);
311                                 return ret;
312                         }
313                 }
314
315                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
316                 if (ret != I40E_SUCCESS) {
317                         rte_free(mv_f);
318                         return ret;
319                 }
320
321                 rte_free(mv_f);
322                 ret = I40E_SUCCESS;
323         }
324
325         return ret;
326 }
327
328 static int
329 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
330 {
331         struct i40e_vsi_context ctxt;
332         struct i40e_hw *hw;
333         int ret;
334
335         if (!vsi)
336                 return -EINVAL;
337
338         hw = I40E_VSI_TO_HW(vsi);
339
340         /* Use the FW API if FW >= v5.0 */
341         if (hw->aq.fw_maj_ver < 5) {
342                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
343                 return -ENOTSUP;
344         }
345
346         /* Check if it has been already on or off */
347         if (vsi->info.valid_sections &
348                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
349                 if (on) {
350                         if ((vsi->info.switch_id &
351                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
352                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
353                                 return 0; /* already on */
354                 } else {
355                         if ((vsi->info.switch_id &
356                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
357                                 return 0; /* already off */
358                 }
359         }
360
361         /* remove all the MAC and VLAN first */
362         ret = i40e_vsi_rm_mac_filter(vsi);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
365                 return ret;
366         }
367         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
368                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
369                 if (ret) {
370                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
371                         return ret;
372                 }
373         }
374
375         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
376         if (on)
377                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
378         else
379                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
380
381         memset(&ctxt, 0, sizeof(ctxt));
382         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
383         ctxt.seid = vsi->seid;
384
385         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
386         if (ret != I40E_SUCCESS) {
387                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
388                 return ret;
389         }
390
391         /* add all the MAC and VLAN back */
392         ret = i40e_vsi_restore_mac_filter(vsi);
393         if (ret)
394                 return ret;
395         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
396                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
397                 if (ret)
398                         return ret;
399         }
400
401         return ret;
402 }
403
404 int
405 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
406 {
407         struct rte_eth_dev *dev;
408         struct i40e_pf *pf;
409         struct i40e_pf_vf *vf;
410         struct i40e_vsi *vsi;
411         uint16_t vf_id;
412         int ret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
415
416         dev = &rte_eth_devices[port];
417
418         if (!is_i40e_supported(dev))
419                 return -ENOTSUP;
420
421         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
422
423         /* setup PF TX loopback */
424         vsi = pf->main_vsi;
425         ret = i40e_vsi_set_tx_loopback(vsi, on);
426         if (ret)
427                 return -ENOTSUP;
428
429         /* setup TX loopback for all the VFs */
430         if (!pf->vfs) {
431                 /* if no VF, do nothing. */
432                 return 0;
433         }
434
435         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
436                 vf = &pf->vfs[vf_id];
437                 vsi = vf->vsi;
438
439                 ret = i40e_vsi_set_tx_loopback(vsi, on);
440                 if (ret)
441                         return -ENOTSUP;
442         }
443
444         return ret;
445 }
446
447 int
448 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
449 {
450         struct rte_eth_dev *dev;
451         struct i40e_pf *pf;
452         struct i40e_vsi *vsi;
453         struct i40e_hw *hw;
454         int ret;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
457
458         dev = &rte_eth_devices[port];
459
460         if (!is_i40e_supported(dev))
461                 return -ENOTSUP;
462
463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464
465         if (vf_id >= pf->vf_num || !pf->vfs) {
466                 PMD_DRV_LOG(ERR, "Invalid argument.");
467                 return -EINVAL;
468         }
469
470         vsi = pf->vfs[vf_id].vsi;
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Invalid VSI.");
473                 return -EINVAL;
474         }
475
476         hw = I40E_VSI_TO_HW(vsi);
477
478         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
479                                                   on, NULL, true);
480         if (ret != I40E_SUCCESS) {
481                 ret = -ENOTSUP;
482                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
483         }
484
485         return ret;
486 }
487
488 int
489 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
490 {
491         struct rte_eth_dev *dev;
492         struct i40e_pf *pf;
493         struct i40e_vsi *vsi;
494         struct i40e_hw *hw;
495         int ret;
496
497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
498
499         dev = &rte_eth_devices[port];
500
501         if (!is_i40e_supported(dev))
502                 return -ENOTSUP;
503
504         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
505
506         if (vf_id >= pf->vf_num || !pf->vfs) {
507                 PMD_DRV_LOG(ERR, "Invalid argument.");
508                 return -EINVAL;
509         }
510
511         vsi = pf->vfs[vf_id].vsi;
512         if (!vsi) {
513                 PMD_DRV_LOG(ERR, "Invalid VSI.");
514                 return -EINVAL;
515         }
516
517         hw = I40E_VSI_TO_HW(vsi);
518
519         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
520                                                     on, NULL);
521         if (ret != I40E_SUCCESS) {
522                 ret = -ENOTSUP;
523                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
524         }
525
526         return ret;
527 }
528
529 int
530 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
531                              struct ether_addr *mac_addr)
532 {
533         struct i40e_mac_filter *f;
534         struct rte_eth_dev *dev;
535         struct i40e_pf_vf *vf;
536         struct i40e_vsi *vsi;
537         struct i40e_pf *pf;
538         void *temp;
539
540         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
541                 return -EINVAL;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
544
545         dev = &rte_eth_devices[port];
546
547         if (!is_i40e_supported(dev))
548                 return -ENOTSUP;
549
550         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
551
552         if (vf_id >= pf->vf_num || !pf->vfs)
553                 return -EINVAL;
554
555         vf = &pf->vfs[vf_id];
556         vsi = vf->vsi;
557         if (!vsi) {
558                 PMD_DRV_LOG(ERR, "Invalid VSI.");
559                 return -EINVAL;
560         }
561
562         ether_addr_copy(mac_addr, &vf->mac_addr);
563
564         /* Remove all existing mac */
565         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
566                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
567                                 != I40E_SUCCESS)
568                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
569
570         return 0;
571 }
572
573 /* Set vlan strip on/off for specific VF from host */
574 int
575 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
576 {
577         struct rte_eth_dev *dev;
578         struct i40e_pf *pf;
579         struct i40e_vsi *vsi;
580         int ret;
581
582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
583
584         dev = &rte_eth_devices[port];
585
586         if (!is_i40e_supported(dev))
587                 return -ENOTSUP;
588
589         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590
591         if (vf_id >= pf->vf_num || !pf->vfs) {
592                 PMD_DRV_LOG(ERR, "Invalid argument.");
593                 return -EINVAL;
594         }
595
596         vsi = pf->vfs[vf_id].vsi;
597
598         if (!vsi)
599                 return -EINVAL;
600
601         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
602         if (ret != I40E_SUCCESS) {
603                 ret = -ENOTSUP;
604                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
605         }
606
607         return ret;
608 }
609
610 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
611                                     uint16_t vlan_id)
612 {
613         struct rte_eth_dev *dev;
614         struct i40e_pf *pf;
615         struct i40e_hw *hw;
616         struct i40e_vsi *vsi;
617         struct i40e_vsi_context ctxt;
618         int ret;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
621
622         if (vlan_id > ETHER_MAX_VLAN_ID) {
623                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
624                 return -EINVAL;
625         }
626
627         dev = &rte_eth_devices[port];
628
629         if (!is_i40e_supported(dev))
630                 return -ENOTSUP;
631
632         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
633         hw = I40E_PF_TO_HW(pf);
634
635         /**
636          * return -ENODEV if SRIOV not enabled, VF number not configured
637          * or no queue assigned.
638          */
639         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
640             pf->vf_nb_qps == 0)
641                 return -ENODEV;
642
643         if (vf_id >= pf->vf_num || !pf->vfs) {
644                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
645                 return -EINVAL;
646         }
647
648         vsi = pf->vfs[vf_id].vsi;
649         if (!vsi) {
650                 PMD_DRV_LOG(ERR, "Invalid VSI.");
651                 return -EINVAL;
652         }
653
654         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
655         vsi->info.pvid = vlan_id;
656         if (vlan_id > 0)
657                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
658         else
659                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
660
661         memset(&ctxt, 0, sizeof(ctxt));
662         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
663         ctxt.seid = vsi->seid;
664
665         hw = I40E_VSI_TO_HW(vsi);
666         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
667         if (ret != I40E_SUCCESS) {
668                 ret = -ENOTSUP;
669                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
670         }
671
672         return ret;
673 }
674
675 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
676                                   uint8_t on)
677 {
678         struct rte_eth_dev *dev;
679         struct i40e_pf *pf;
680         struct i40e_vsi *vsi;
681         struct i40e_hw *hw;
682         struct i40e_mac_filter_info filter;
683         struct ether_addr broadcast = {
684                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
685         int ret;
686
687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
688
689         if (on > 1) {
690                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
691                 return -EINVAL;
692         }
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_i40e_supported(dev))
697                 return -ENOTSUP;
698
699         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700         hw = I40E_PF_TO_HW(pf);
701
702         if (vf_id >= pf->vf_num || !pf->vfs) {
703                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
704                 return -EINVAL;
705         }
706
707         /**
708          * return -ENODEV if SRIOV not enabled, VF number not configured
709          * or no queue assigned.
710          */
711         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
712             pf->vf_nb_qps == 0) {
713                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
714                 return -ENODEV;
715         }
716
717         vsi = pf->vfs[vf_id].vsi;
718         if (!vsi) {
719                 PMD_DRV_LOG(ERR, "Invalid VSI.");
720                 return -EINVAL;
721         }
722
723         if (on) {
724                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
725                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
726                 ret = i40e_vsi_add_mac(vsi, &filter);
727         } else {
728                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
729         }
730
731         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
732                 ret = -ENOTSUP;
733                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
734         } else {
735                 ret = 0;
736         }
737
738         return ret;
739 }
740
741 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
742 {
743         struct rte_eth_dev *dev;
744         struct i40e_pf *pf;
745         struct i40e_hw *hw;
746         struct i40e_vsi *vsi;
747         struct i40e_vsi_context ctxt;
748         int ret;
749
750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
751
752         if (on > 1) {
753                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
754                 return -EINVAL;
755         }
756
757         dev = &rte_eth_devices[port];
758
759         if (!is_i40e_supported(dev))
760                 return -ENOTSUP;
761
762         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763         hw = I40E_PF_TO_HW(pf);
764
765         /**
766          * return -ENODEV if SRIOV not enabled, VF number not configured
767          * or no queue assigned.
768          */
769         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
770             pf->vf_nb_qps == 0) {
771                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
772                 return -ENODEV;
773         }
774
775         if (vf_id >= pf->vf_num || !pf->vfs) {
776                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
777                 return -EINVAL;
778         }
779
780         vsi = pf->vfs[vf_id].vsi;
781         if (!vsi) {
782                 PMD_DRV_LOG(ERR, "Invalid VSI.");
783                 return -EINVAL;
784         }
785
786         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
787         if (on) {
788                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
789                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
790         } else {
791                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
792                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
793         }
794
795         memset(&ctxt, 0, sizeof(ctxt));
796         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
797         ctxt.seid = vsi->seid;
798
799         hw = I40E_VSI_TO_HW(vsi);
800         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
801         if (ret != I40E_SUCCESS) {
802                 ret = -ENOTSUP;
803                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
804         }
805
806         return ret;
807 }
808
809 static int
810 i40e_vlan_filter_count(struct i40e_vsi *vsi)
811 {
812         uint32_t j, k;
813         uint16_t vlan_id;
814         int count = 0;
815
816         for (j = 0; j < I40E_VFTA_SIZE; j++) {
817                 if (!vsi->vfta[j])
818                         continue;
819
820                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
821                         if (!(vsi->vfta[j] & (1 << k)))
822                                 continue;
823
824                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
825                         if (!vlan_id)
826                                 continue;
827
828                         count++;
829                 }
830         }
831
832         return count;
833 }
834
835 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
836                                     uint64_t vf_mask, uint8_t on)
837 {
838         struct rte_eth_dev *dev;
839         struct i40e_pf *pf;
840         struct i40e_hw *hw;
841         struct i40e_vsi *vsi;
842         uint16_t vf_idx;
843         int ret = I40E_SUCCESS;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
846
847         dev = &rte_eth_devices[port];
848
849         if (!is_i40e_supported(dev))
850                 return -ENOTSUP;
851
852         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
853                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
854                 return -EINVAL;
855         }
856
857         if (vf_mask == 0) {
858                 PMD_DRV_LOG(ERR, "No VF.");
859                 return -EINVAL;
860         }
861
862         if (on > 1) {
863                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
864                 return -EINVAL;
865         }
866
867         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         hw = I40E_PF_TO_HW(pf);
869
870         /**
871          * return -ENODEV if SRIOV not enabled, VF number not configured
872          * or no queue assigned.
873          */
874         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
875             pf->vf_nb_qps == 0) {
876                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
877                 return -ENODEV;
878         }
879
880         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
881                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
882                         vsi = pf->vfs[vf_idx].vsi;
883                         if (on) {
884                                 if (!vsi->vlan_filter_on) {
885                                         vsi->vlan_filter_on = true;
886                                         i40e_aq_set_vsi_vlan_promisc(hw,
887                                                                      vsi->seid,
888                                                                      false,
889                                                                      NULL);
890                                         if (!vsi->vlan_anti_spoof_on)
891                                                 i40e_add_rm_all_vlan_filter(
892                                                         vsi, true);
893                                 }
894                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
895                         } else {
896                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
897
898                                 if (!i40e_vlan_filter_count(vsi)) {
899                                         vsi->vlan_filter_on = false;
900                                         i40e_aq_set_vsi_vlan_promisc(hw,
901                                                                      vsi->seid,
902                                                                      true,
903                                                                      NULL);
904                                 }
905                         }
906                 }
907         }
908
909         if (ret != I40E_SUCCESS) {
910                 ret = -ENOTSUP;
911                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
912         }
913
914         return ret;
915 }
916
917 int
918 rte_pmd_i40e_get_vf_stats(uint16_t port,
919                           uint16_t vf_id,
920                           struct rte_eth_stats *stats)
921 {
922         struct rte_eth_dev *dev;
923         struct i40e_pf *pf;
924         struct i40e_vsi *vsi;
925
926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
927
928         dev = &rte_eth_devices[port];
929
930         if (!is_i40e_supported(dev))
931                 return -ENOTSUP;
932
933         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934
935         if (vf_id >= pf->vf_num || !pf->vfs) {
936                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
937                 return -EINVAL;
938         }
939
940         vsi = pf->vfs[vf_id].vsi;
941         if (!vsi) {
942                 PMD_DRV_LOG(ERR, "Invalid VSI.");
943                 return -EINVAL;
944         }
945
946         i40e_update_vsi_stats(vsi);
947
948         stats->ipackets = vsi->eth_stats.rx_unicast +
949                         vsi->eth_stats.rx_multicast +
950                         vsi->eth_stats.rx_broadcast;
951         stats->opackets = vsi->eth_stats.tx_unicast +
952                         vsi->eth_stats.tx_multicast +
953                         vsi->eth_stats.tx_broadcast;
954         stats->ibytes   = vsi->eth_stats.rx_bytes;
955         stats->obytes   = vsi->eth_stats.tx_bytes;
956         stats->ierrors  = vsi->eth_stats.rx_discards;
957         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
958
959         return 0;
960 }
961
962 int
963 rte_pmd_i40e_reset_vf_stats(uint16_t port,
964                             uint16_t vf_id)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         vsi->offset_loaded = false;
991         i40e_update_vsi_stats(vsi);
992
993         return 0;
994 }
995
996 int
997 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
998 {
999         struct rte_eth_dev *dev;
1000         struct i40e_pf *pf;
1001         struct i40e_vsi *vsi;
1002         struct i40e_hw *hw;
1003         int ret = 0;
1004         int i;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1007
1008         dev = &rte_eth_devices[port];
1009
1010         if (!is_i40e_supported(dev))
1011                 return -ENOTSUP;
1012
1013         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014
1015         if (vf_id >= pf->vf_num || !pf->vfs) {
1016                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1017                 return -EINVAL;
1018         }
1019
1020         vsi = pf->vfs[vf_id].vsi;
1021         if (!vsi) {
1022                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1023                 return -EINVAL;
1024         }
1025
1026         if (bw > I40E_QOS_BW_MAX) {
1027                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1028                             I40E_QOS_BW_MAX);
1029                 return -EINVAL;
1030         }
1031
1032         if (bw % I40E_QOS_BW_GRANULARITY) {
1033                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1034                             I40E_QOS_BW_GRANULARITY);
1035                 return -EINVAL;
1036         }
1037
1038         bw /= I40E_QOS_BW_GRANULARITY;
1039
1040         hw = I40E_VSI_TO_HW(vsi);
1041
1042         /* No change. */
1043         if (bw == vsi->bw_info.bw_limit) {
1044                 PMD_DRV_LOG(INFO,
1045                             "No change for VF max bandwidth. Nothing to do.");
1046                 return 0;
1047         }
1048
1049         /**
1050          * VF bandwidth limitation and TC bandwidth limitation cannot be
1051          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1052          *
1053          * If bw is 0, means disable bandwidth limitation. Then no need to
1054          * check TC bandwidth limitation.
1055          */
1056         if (bw) {
1057                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1058                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1059                             vsi->bw_info.bw_ets_credits[i])
1060                                 break;
1061                 }
1062                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1063                         PMD_DRV_LOG(ERR,
1064                                     "TC max bandwidth has been set on this VF,"
1065                                     " please disable it first.");
1066                         return -EINVAL;
1067                 }
1068         }
1069
1070         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1071         if (ret) {
1072                 PMD_DRV_LOG(ERR,
1073                             "Failed to set VF %d bandwidth, err(%d).",
1074                             vf_id, ret);
1075                 return -EINVAL;
1076         }
1077
1078         /* Store the configuration. */
1079         vsi->bw_info.bw_limit = (uint16_t)bw;
1080         vsi->bw_info.bw_max = 0;
1081
1082         return 0;
1083 }
1084
1085 int
1086 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1087                                 uint8_t tc_num, uint8_t *bw_weight)
1088 {
1089         struct rte_eth_dev *dev;
1090         struct i40e_pf *pf;
1091         struct i40e_vsi *vsi;
1092         struct i40e_hw *hw;
1093         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1094         int ret = 0;
1095         int i, j;
1096         uint16_t sum;
1097         bool b_change = false;
1098
1099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1100
1101         dev = &rte_eth_devices[port];
1102
1103         if (!is_i40e_supported(dev))
1104                 return -ENOTSUP;
1105
1106         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1107
1108         if (vf_id >= pf->vf_num || !pf->vfs) {
1109                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1110                 return -EINVAL;
1111         }
1112
1113         vsi = pf->vfs[vf_id].vsi;
1114         if (!vsi) {
1115                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1116                 return -EINVAL;
1117         }
1118
1119         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1120                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1121                             I40E_MAX_TRAFFIC_CLASS);
1122                 return -EINVAL;
1123         }
1124
1125         sum = 0;
1126         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1127                 if (vsi->enabled_tc & BIT_ULL(i))
1128                         sum++;
1129         }
1130         if (sum != tc_num) {
1131                 PMD_DRV_LOG(ERR,
1132                             "Weight should be set for all %d enabled TCs.",
1133                             sum);
1134                 return -EINVAL;
1135         }
1136
1137         sum = 0;
1138         for (i = 0; i < tc_num; i++) {
1139                 if (!bw_weight[i]) {
1140                         PMD_DRV_LOG(ERR,
1141                                     "The weight should be 1 at least.");
1142                         return -EINVAL;
1143                 }
1144                 sum += bw_weight[i];
1145         }
1146         if (sum != 100) {
1147                 PMD_DRV_LOG(ERR,
1148                             "The summary of the TC weight should be 100.");
1149                 return -EINVAL;
1150         }
1151
1152         /**
1153          * Create the configuration for all the TCs.
1154          */
1155         memset(&tc_bw, 0, sizeof(tc_bw));
1156         tc_bw.tc_valid_bits = vsi->enabled_tc;
1157         j = 0;
1158         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1159                 if (vsi->enabled_tc & BIT_ULL(i)) {
1160                         if (bw_weight[j] !=
1161                                 vsi->bw_info.bw_ets_share_credits[i])
1162                                 b_change = true;
1163
1164                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1165                         j++;
1166                 }
1167         }
1168
1169         /* No change. */
1170         if (!b_change) {
1171                 PMD_DRV_LOG(INFO,
1172                             "No change for TC allocated bandwidth."
1173                             " Nothing to do.");
1174                 return 0;
1175         }
1176
1177         hw = I40E_VSI_TO_HW(vsi);
1178
1179         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1180         if (ret) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1183                             vf_id, ret);
1184                 return -EINVAL;
1185         }
1186
1187         /* Store the configuration. */
1188         j = 0;
1189         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1190                 if (vsi->enabled_tc & BIT_ULL(i)) {
1191                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1192                         j++;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 int
1200 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1201                               uint8_t tc_no, uint32_t bw)
1202 {
1203         struct rte_eth_dev *dev;
1204         struct i40e_pf *pf;
1205         struct i40e_vsi *vsi;
1206         struct i40e_hw *hw;
1207         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1208         int ret = 0;
1209         int i;
1210
1211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1212
1213         dev = &rte_eth_devices[port];
1214
1215         if (!is_i40e_supported(dev))
1216                 return -ENOTSUP;
1217
1218         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219
1220         if (vf_id >= pf->vf_num || !pf->vfs) {
1221                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1222                 return -EINVAL;
1223         }
1224
1225         vsi = pf->vfs[vf_id].vsi;
1226         if (!vsi) {
1227                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1228                 return -EINVAL;
1229         }
1230
1231         if (bw > I40E_QOS_BW_MAX) {
1232                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1233                             I40E_QOS_BW_MAX);
1234                 return -EINVAL;
1235         }
1236
1237         if (bw % I40E_QOS_BW_GRANULARITY) {
1238                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1239                             I40E_QOS_BW_GRANULARITY);
1240                 return -EINVAL;
1241         }
1242
1243         bw /= I40E_QOS_BW_GRANULARITY;
1244
1245         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1246                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1247                             I40E_MAX_TRAFFIC_CLASS);
1248                 return -EINVAL;
1249         }
1250
1251         hw = I40E_VSI_TO_HW(vsi);
1252
1253         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1254                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1255                             vf_id, tc_no);
1256                 return -EINVAL;
1257         }
1258
1259         /* No change. */
1260         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1261                 PMD_DRV_LOG(INFO,
1262                             "No change for TC max bandwidth. Nothing to do.");
1263                 return 0;
1264         }
1265
1266         /**
1267          * VF bandwidth limitation and TC bandwidth limitation cannot be
1268          * enabled in parallel, disable VF bandwidth limitation if it's
1269          * enabled.
1270          * If bw is 0, means disable bandwidth limitation. Then no need to
1271          * care about VF bandwidth limitation configuration.
1272          */
1273         if (bw && vsi->bw_info.bw_limit) {
1274                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1275                 if (ret) {
1276                         PMD_DRV_LOG(ERR,
1277                                     "Failed to disable VF(%d)"
1278                                     " bandwidth limitation, err(%d).",
1279                                     vf_id, ret);
1280                         return -EINVAL;
1281                 }
1282
1283                 PMD_DRV_LOG(INFO,
1284                             "VF max bandwidth is disabled according"
1285                             " to TC max bandwidth setting.");
1286         }
1287
1288         /**
1289          * Get all the TCs' info to create a whole picture.
1290          * Because the incremental change isn't permitted.
1291          */
1292         memset(&tc_bw, 0, sizeof(tc_bw));
1293         tc_bw.tc_valid_bits = vsi->enabled_tc;
1294         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295                 if (vsi->enabled_tc & BIT_ULL(i)) {
1296                         tc_bw.tc_bw_credits[i] =
1297                                 rte_cpu_to_le_16(
1298                                         vsi->bw_info.bw_ets_credits[i]);
1299                 }
1300         }
1301         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1302
1303         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1304         if (ret) {
1305                 PMD_DRV_LOG(ERR,
1306                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1307                             vf_id, tc_no, ret);
1308                 return -EINVAL;
1309         }
1310
1311         /* Store the configuration. */
1312         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1313
1314         return 0;
1315 }
1316
1317 int
1318 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct i40e_pf *pf;
1322         struct i40e_vsi *vsi;
1323         struct i40e_veb *veb;
1324         struct i40e_hw *hw;
1325         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1326         int i;
1327         int ret;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1330
1331         dev = &rte_eth_devices[port];
1332
1333         if (!is_i40e_supported(dev))
1334                 return -ENOTSUP;
1335
1336         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337
1338         vsi = pf->main_vsi;
1339         if (!vsi) {
1340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1341                 return -EINVAL;
1342         }
1343
1344         veb = vsi->veb;
1345         if (!veb) {
1346                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1347                 return -EINVAL;
1348         }
1349
1350         if ((tc_map & veb->enabled_tc) != tc_map) {
1351                 PMD_DRV_LOG(ERR,
1352                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1353                             veb->enabled_tc);
1354                 return -EINVAL;
1355         }
1356
1357         if (tc_map == veb->strict_prio_tc) {
1358                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1359                 return 0;
1360         }
1361
1362         hw = I40E_VSI_TO_HW(vsi);
1363
1364         /* Disable DCBx if it's the first time to set strict priority. */
1365         if (!veb->strict_prio_tc) {
1366                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1367                 if (ret)
1368                         PMD_DRV_LOG(INFO,
1369                                     "Failed to disable DCBx as it's already"
1370                                     " disabled.");
1371                 else
1372                         PMD_DRV_LOG(INFO,
1373                                     "DCBx is disabled according to strict"
1374                                     " priority setting.");
1375         }
1376
1377         memset(&ets_data, 0, sizeof(ets_data));
1378         ets_data.tc_valid_bits = veb->enabled_tc;
1379         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1380         ets_data.tc_strict_priority_flags = tc_map;
1381         /* Get all TCs' bandwidth. */
1382         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383                 if (veb->enabled_tc & BIT_ULL(i)) {
1384                         /* For rubust, if bandwidth is 0, use 1 instead. */
1385                         if (veb->bw_info.bw_ets_share_credits[i])
1386                                 ets_data.tc_bw_share_credits[i] =
1387                                         veb->bw_info.bw_ets_share_credits[i];
1388                         else
1389                                 ets_data.tc_bw_share_credits[i] =
1390                                         I40E_QOS_BW_WEIGHT_MIN;
1391                 }
1392         }
1393
1394         if (!veb->strict_prio_tc)
1395                 ret = i40e_aq_config_switch_comp_ets(
1396                         hw, veb->uplink_seid,
1397                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1398                         NULL);
1399         else if (tc_map)
1400                 ret = i40e_aq_config_switch_comp_ets(
1401                         hw, veb->uplink_seid,
1402                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1403                         NULL);
1404         else
1405                 ret = i40e_aq_config_switch_comp_ets(
1406                         hw, veb->uplink_seid,
1407                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1408                         NULL);
1409
1410         if (ret) {
1411                 PMD_DRV_LOG(ERR,
1412                             "Failed to set TCs' strict priority mode."
1413                             " err (%d)", ret);
1414                 return -EINVAL;
1415         }
1416
1417         veb->strict_prio_tc = tc_map;
1418
1419         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1420         if (!tc_map) {
1421                 ret = i40e_aq_start_lldp(hw, NULL);
1422                 if (ret) {
1423                         PMD_DRV_LOG(ERR,
1424                                     "Failed to enable DCBx, err(%d).", ret);
1425                         return -EINVAL;
1426                 }
1427
1428                 PMD_DRV_LOG(INFO,
1429                             "DCBx is enabled again according to strict"
1430                             " priority setting.");
1431         }
1432
1433         return ret;
1434 }
1435
1436 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1437 #define I40E_MAX_PROFILE_NUM 16
1438
1439 static void
1440 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1441                                uint32_t track_id, uint8_t *profile_info_sec,
1442                                bool add)
1443 {
1444         struct i40e_profile_section_header *sec = NULL;
1445         struct i40e_profile_info *pinfo;
1446
1447         sec = (struct i40e_profile_section_header *)profile_info_sec;
1448         sec->tbl_size = 1;
1449         sec->data_end = sizeof(struct i40e_profile_section_header) +
1450                 sizeof(struct i40e_profile_info);
1451         sec->section.type = SECTION_TYPE_INFO;
1452         sec->section.offset = sizeof(struct i40e_profile_section_header);
1453         sec->section.size = sizeof(struct i40e_profile_info);
1454         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1455                                              sec->section.offset);
1456         pinfo->track_id = track_id;
1457         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1458         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1459         if (add)
1460                 pinfo->op = I40E_DDP_ADD_TRACKID;
1461         else
1462                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1463 }
1464
1465 static enum i40e_status_code
1466 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1467 {
1468         enum i40e_status_code status = I40E_SUCCESS;
1469         struct i40e_profile_section_header *sec;
1470         uint32_t track_id;
1471         uint32_t offset = 0;
1472         uint32_t info = 0;
1473
1474         sec = (struct i40e_profile_section_header *)profile_info_sec;
1475         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1476                                          sec->section.offset))->track_id;
1477
1478         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1479                                    track_id, &offset, &info, NULL);
1480         if (status)
1481                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1482                             "offset %d, info %d",
1483                             offset, info);
1484
1485         return status;
1486 }
1487
1488 /* Check if the profile info exists */
1489 static int
1490 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1491 {
1492         struct rte_eth_dev *dev = &rte_eth_devices[port];
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint8_t *buff;
1495         struct rte_pmd_i40e_profile_list *p_list;
1496         struct rte_pmd_i40e_profile_info *pinfo, *p;
1497         uint32_t i;
1498         int ret;
1499         static const uint32_t group_mask = 0x00ff0000;
1500
1501         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1502                              sizeof(struct i40e_profile_section_header));
1503         if (pinfo->track_id == 0) {
1504                 PMD_DRV_LOG(INFO, "Read-only profile.");
1505                 return 0;
1506         }
1507         buff = rte_zmalloc("pinfo_list",
1508                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1509                            0);
1510         if (!buff) {
1511                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1512                 return -1;
1513         }
1514
1515         ret = i40e_aq_get_ddp_list(
1516                 hw, (void *)buff,
1517                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1518                 0, NULL);
1519         if (ret) {
1520                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1521                 rte_free(buff);
1522                 return -1;
1523         }
1524         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1525         for (i = 0; i < p_list->p_count; i++) {
1526                 p = &p_list->p_info[i];
1527                 if (pinfo->track_id == p->track_id) {
1528                         PMD_DRV_LOG(INFO, "Profile exists.");
1529                         rte_free(buff);
1530                         return 1;
1531                 }
1532         }
1533         for (i = 0; i < p_list->p_count; i++) {
1534                 p = &p_list->p_info[i];
1535                 if ((p->track_id & group_mask) == 0) {
1536                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1537                         rte_free(buff);
1538                         return 2;
1539                 }
1540         }
1541         for (i = 0; i < p_list->p_count; i++) {
1542                 p = &p_list->p_info[i];
1543                 if ((pinfo->track_id & group_mask) !=
1544                     (p->track_id & group_mask)) {
1545                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1546                         rte_free(buff);
1547                         return 3;
1548                 }
1549         }
1550
1551         rte_free(buff);
1552         return 0;
1553 }
1554
1555 int
1556 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1557                                  uint32_t size,
1558                                  enum rte_pmd_i40e_package_op op)
1559 {
1560         struct rte_eth_dev *dev;
1561         struct i40e_hw *hw;
1562         struct i40e_package_header *pkg_hdr;
1563         struct i40e_generic_seg_header *profile_seg_hdr;
1564         struct i40e_generic_seg_header *metadata_seg_hdr;
1565         uint32_t track_id;
1566         uint8_t *profile_info_sec;
1567         int is_exist;
1568         enum i40e_status_code status = I40E_SUCCESS;
1569         static const uint32_t type_mask = 0xff000000;
1570
1571         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1572                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1573                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1574                 PMD_DRV_LOG(ERR, "Operation not supported.");
1575                 return -ENOTSUP;
1576         }
1577
1578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1579
1580         dev = &rte_eth_devices[port];
1581
1582         if (!is_i40e_supported(dev))
1583                 return -ENOTSUP;
1584
1585         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1586
1587         if (size < (sizeof(struct i40e_package_header) +
1588                     sizeof(struct i40e_metadata_segment) +
1589                     sizeof(uint32_t) * 2)) {
1590                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1591                 return -EINVAL;
1592         }
1593
1594         pkg_hdr = (struct i40e_package_header *)buff;
1595
1596         if (!pkg_hdr) {
1597                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1598                 return -EINVAL;
1599         }
1600
1601         if (pkg_hdr->segment_count < 2) {
1602                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1603                 return -EINVAL;
1604         }
1605
1606         i40e_update_customized_info(dev, buff, size);
1607
1608         /* Find metadata segment */
1609         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1610                                                         pkg_hdr);
1611         if (!metadata_seg_hdr) {
1612                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1613                 return -EINVAL;
1614         }
1615         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1616         if (track_id == I40E_DDP_TRACKID_INVALID) {
1617                 PMD_DRV_LOG(ERR, "Invalid track_id");
1618                 return -EINVAL;
1619         }
1620
1621         /* force read-only track_id for type 0 */
1622         if ((track_id & type_mask) == 0)
1623                 track_id = 0;
1624
1625         /* Find profile segment */
1626         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1627                                                        pkg_hdr);
1628         if (!profile_seg_hdr) {
1629                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1630                 return -EINVAL;
1631         }
1632
1633         profile_info_sec = rte_zmalloc(
1634                 "i40e_profile_info",
1635                 sizeof(struct i40e_profile_section_header) +
1636                 sizeof(struct i40e_profile_info),
1637                 0);
1638         if (!profile_info_sec) {
1639                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1640                 return -EINVAL;
1641         }
1642
1643         /* Check if the profile already loaded */
1644         i40e_generate_profile_info_sec(
1645                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1646                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1647                 track_id, profile_info_sec,
1648                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1649         is_exist = i40e_check_profile_info(port, profile_info_sec);
1650         if (is_exist < 0) {
1651                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1652                 rte_free(profile_info_sec);
1653                 return -EINVAL;
1654         }
1655
1656         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1657                 if (is_exist) {
1658                         if (is_exist == 1)
1659                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1660                         else if (is_exist == 2)
1661                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1662                         else if (is_exist == 3)
1663                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1664                         rte_free(profile_info_sec);
1665                         return -EEXIST;
1666                 }
1667         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1668                 if (is_exist != 1) {
1669                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1670                         rte_free(profile_info_sec);
1671                         return -EACCES;
1672                 }
1673         }
1674
1675         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1676                 status = i40e_rollback_profile(
1677                         hw,
1678                         (struct i40e_profile_segment *)profile_seg_hdr,
1679                         track_id);
1680                 if (status) {
1681                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1682                         rte_free(profile_info_sec);
1683                         return status;
1684                 }
1685         } else {
1686                 status = i40e_write_profile(
1687                         hw,
1688                         (struct i40e_profile_segment *)profile_seg_hdr,
1689                         track_id);
1690                 if (status) {
1691                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1692                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1693                         else
1694                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1695                         rte_free(profile_info_sec);
1696                         return status;
1697                 }
1698         }
1699
1700         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1701                 /* Modify loaded profiles info list */
1702                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1703                 if (status) {
1704                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1705                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1706                         else
1707                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1708                 }
1709         }
1710
1711         rte_free(profile_info_sec);
1712         return status;
1713 }
1714
1715 /* Get number of tvl records in the section */
1716 static unsigned int
1717 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1718 {
1719         unsigned int i, nb_rec, nb_tlv = 0;
1720         struct i40e_profile_tlv_section_record *tlv;
1721
1722         if (!sec)
1723                 return nb_tlv;
1724
1725         /* get number of records in the section */
1726         nb_rec = sec->section.size /
1727                                 sizeof(struct i40e_profile_tlv_section_record);
1728         for (i = 0; i < nb_rec; ) {
1729                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1730                 i += tlv->len;
1731                 nb_tlv++;
1732         }
1733         return nb_tlv;
1734 }
1735
1736 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1737         uint8_t *info_buff, uint32_t info_size,
1738         enum rte_pmd_i40e_package_info type)
1739 {
1740         uint32_t ret_size;
1741         struct i40e_package_header *pkg_hdr;
1742         struct i40e_generic_seg_header *i40e_seg_hdr;
1743         struct i40e_generic_seg_header *note_seg_hdr;
1744         struct i40e_generic_seg_header *metadata_seg_hdr;
1745
1746         if (!info_buff) {
1747                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1748                 return -EINVAL;
1749         }
1750
1751         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1752                 sizeof(struct i40e_metadata_segment) +
1753                 sizeof(uint32_t) * 2)) {
1754                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1755                 return -EINVAL;
1756         }
1757
1758         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1759         if (pkg_hdr->segment_count < 2) {
1760                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1761                 return -EINVAL;
1762         }
1763
1764         /* Find metadata segment */
1765         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1766                 pkg_hdr);
1767
1768         /* Find global notes segment */
1769         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1770                 pkg_hdr);
1771
1772         /* Find i40e profile segment */
1773         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1774
1775         /* get global header info */
1776         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1777                 struct rte_pmd_i40e_profile_info *info =
1778                         (struct rte_pmd_i40e_profile_info *)info_buff;
1779
1780                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1781                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1782                         return -EINVAL;
1783                 }
1784
1785                 if (!metadata_seg_hdr) {
1786                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1787                         return -EINVAL;
1788                 }
1789
1790                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1791                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1792                 info->track_id =
1793                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1794
1795                 memcpy(info->name,
1796                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1797                         I40E_DDP_NAME_SIZE);
1798                 memcpy(&info->version,
1799                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1800                         sizeof(struct i40e_ddp_version));
1801                 return I40E_SUCCESS;
1802         }
1803
1804         /* get global note size */
1805         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1806                 if (info_size < sizeof(uint32_t)) {
1807                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1808                         return -EINVAL;
1809                 }
1810                 if (note_seg_hdr == NULL)
1811                         ret_size = 0;
1812                 else
1813                         ret_size = note_seg_hdr->size;
1814                 *(uint32_t *)info_buff = ret_size;
1815                 return I40E_SUCCESS;
1816         }
1817
1818         /* get global note */
1819         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1820                 if (note_seg_hdr == NULL)
1821                         return -ENOTSUP;
1822                 if (info_size < note_seg_hdr->size) {
1823                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1824                         return -EINVAL;
1825                 }
1826                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1827                 return I40E_SUCCESS;
1828         }
1829
1830         /* get i40e segment header info */
1831         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1832                 struct rte_pmd_i40e_profile_info *info =
1833                         (struct rte_pmd_i40e_profile_info *)info_buff;
1834
1835                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1836                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1837                         return -EINVAL;
1838                 }
1839
1840                 if (!metadata_seg_hdr) {
1841                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1842                         return -EINVAL;
1843                 }
1844
1845                 if (!i40e_seg_hdr) {
1846                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1847                         return -EINVAL;
1848                 }
1849
1850                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1851                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1852                 info->track_id =
1853                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1854
1855                 memcpy(info->name,
1856                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1857                         I40E_DDP_NAME_SIZE);
1858                 memcpy(&info->version,
1859                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1860                         sizeof(struct i40e_ddp_version));
1861                 return I40E_SUCCESS;
1862         }
1863
1864         /* get number of devices */
1865         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1866                 if (info_size < sizeof(uint32_t)) {
1867                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1868                         return -EINVAL;
1869                 }
1870                 *(uint32_t *)info_buff =
1871                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1872                 return I40E_SUCCESS;
1873         }
1874
1875         /* get list of devices */
1876         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1877                 uint32_t dev_num;
1878                 dev_num =
1879                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1880                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1881                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1882                         return -EINVAL;
1883                 }
1884                 memcpy(info_buff,
1885                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1886                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1887                 return I40E_SUCCESS;
1888         }
1889
1890         /* get number of protocols */
1891         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1892                 struct i40e_profile_section_header *proto;
1893
1894                 if (info_size < sizeof(uint32_t)) {
1895                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1896                         return -EINVAL;
1897                 }
1898                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1899                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1900                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1901                 return I40E_SUCCESS;
1902         }
1903
1904         /* get list of protocols */
1905         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1906                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1907                 struct rte_pmd_i40e_proto_info *pinfo;
1908                 struct i40e_profile_section_header *proto;
1909                 struct i40e_profile_tlv_section_record *tlv;
1910
1911                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1912                 nb_proto_info = info_size /
1913                                         sizeof(struct rte_pmd_i40e_proto_info);
1914                 for (i = 0; i < nb_proto_info; i++) {
1915                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1916                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1917                 }
1918                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1919                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1920                 nb_tlv = i40e_get_tlv_section_size(proto);
1921                 if (nb_tlv == 0)
1922                         return I40E_SUCCESS;
1923                 if (nb_proto_info < nb_tlv) {
1924                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1925                         return -EINVAL;
1926                 }
1927                 /* get number of records in the section */
1928                 nb_rec = proto->section.size /
1929                                 sizeof(struct i40e_profile_tlv_section_record);
1930                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1931                 for (i = j = 0; i < nb_rec; j++) {
1932                         pinfo[j].proto_id = tlv->data[0];
1933                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1934                                  (const char *)&tlv->data[1]);
1935                         i += tlv->len;
1936                         tlv = &tlv[tlv->len];
1937                 }
1938                 return I40E_SUCCESS;
1939         }
1940
1941         /* get number of packet classification types */
1942         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1943                 struct i40e_profile_section_header *pctype;
1944
1945                 if (info_size < sizeof(uint32_t)) {
1946                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1947                         return -EINVAL;
1948                 }
1949                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1950                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1951                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1952                 return I40E_SUCCESS;
1953         }
1954
1955         /* get list of packet classification types */
1956         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1957                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1958                 struct rte_pmd_i40e_ptype_info *pinfo;
1959                 struct i40e_profile_section_header *pctype;
1960                 struct i40e_profile_tlv_section_record *tlv;
1961
1962                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1963                 nb_proto_info = info_size /
1964                                         sizeof(struct rte_pmd_i40e_ptype_info);
1965                 for (i = 0; i < nb_proto_info; i++)
1966                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1967                                sizeof(struct rte_pmd_i40e_ptype_info));
1968                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1969                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1970                 nb_tlv = i40e_get_tlv_section_size(pctype);
1971                 if (nb_tlv == 0)
1972                         return I40E_SUCCESS;
1973                 if (nb_proto_info < nb_tlv) {
1974                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1975                         return -EINVAL;
1976                 }
1977
1978                 /* get number of records in the section */
1979                 nb_rec = pctype->section.size /
1980                                 sizeof(struct i40e_profile_tlv_section_record);
1981                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1982                 for (i = j = 0; i < nb_rec; j++) {
1983                         memcpy(&pinfo[j], tlv->data,
1984                                sizeof(struct rte_pmd_i40e_ptype_info));
1985                         i += tlv->len;
1986                         tlv = &tlv[tlv->len];
1987                 }
1988                 return I40E_SUCCESS;
1989         }
1990
1991         /* get number of packet types */
1992         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
1993                 struct i40e_profile_section_header *ptype;
1994
1995                 if (info_size < sizeof(uint32_t)) {
1996                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1997                         return -EINVAL;
1998                 }
1999                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2000                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2001                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2002                 return I40E_SUCCESS;
2003         }
2004
2005         /* get list of packet types */
2006         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2007                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2008                 struct rte_pmd_i40e_ptype_info *pinfo;
2009                 struct i40e_profile_section_header *ptype;
2010                 struct i40e_profile_tlv_section_record *tlv;
2011
2012                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2013                 nb_proto_info = info_size /
2014                                         sizeof(struct rte_pmd_i40e_ptype_info);
2015                 for (i = 0; i < nb_proto_info; i++)
2016                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2017                                sizeof(struct rte_pmd_i40e_ptype_info));
2018                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2019                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2020                 nb_tlv = i40e_get_tlv_section_size(ptype);
2021                 if (nb_tlv == 0)
2022                         return I40E_SUCCESS;
2023                 if (nb_proto_info < nb_tlv) {
2024                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2025                         return -EINVAL;
2026                 }
2027                 /* get number of records in the section */
2028                 nb_rec = ptype->section.size /
2029                                 sizeof(struct i40e_profile_tlv_section_record);
2030                 for (i = j = 0; i < nb_rec; j++) {
2031                         tlv = (struct i40e_profile_tlv_section_record *)
2032                                                                 &ptype[1 + i];
2033                         memcpy(&pinfo[j], tlv->data,
2034                                sizeof(struct rte_pmd_i40e_ptype_info));
2035                         i += tlv->len;
2036                 }
2037                 return I40E_SUCCESS;
2038         }
2039
2040         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2041         return -EINVAL;
2042 }
2043
2044 int
2045 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2046 {
2047         struct rte_eth_dev *dev;
2048         struct i40e_hw *hw;
2049         enum i40e_status_code status = I40E_SUCCESS;
2050
2051         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2052
2053         dev = &rte_eth_devices[port];
2054
2055         if (!is_i40e_supported(dev))
2056                 return -ENOTSUP;
2057
2058         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2059                 return -EINVAL;
2060
2061         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2062
2063         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2064                                       size, 0, NULL);
2065
2066         return status;
2067 }
2068
2069 static int check_invalid_pkt_type(uint32_t pkt_type)
2070 {
2071         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2072
2073         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2074         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2075         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2076         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2077         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2078         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2079         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2080
2081         if (l2 &&
2082             l2 != RTE_PTYPE_L2_ETHER &&
2083             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2084             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2085             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2086             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2087             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2088             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2089             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2090                 return -1;
2091
2092         if (l3 &&
2093             l3 != RTE_PTYPE_L3_IPV4 &&
2094             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2095             l3 != RTE_PTYPE_L3_IPV6 &&
2096             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2097             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2098             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2099                 return -1;
2100
2101         if (l4 &&
2102             l4 != RTE_PTYPE_L4_TCP &&
2103             l4 != RTE_PTYPE_L4_UDP &&
2104             l4 != RTE_PTYPE_L4_FRAG &&
2105             l4 != RTE_PTYPE_L4_SCTP &&
2106             l4 != RTE_PTYPE_L4_ICMP &&
2107             l4 != RTE_PTYPE_L4_NONFRAG)
2108                 return -1;
2109
2110         if (tnl &&
2111             tnl != RTE_PTYPE_TUNNEL_IP &&
2112             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2113             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2114             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2115             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2116             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2117             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2118             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2119             tnl != RTE_PTYPE_TUNNEL_L2TP)
2120                 return -1;
2121
2122         if (il2 &&
2123             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2124             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2125             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2126                 return -1;
2127
2128         if (il3 &&
2129             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2130             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2131             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2132             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2133             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2134             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2135                 return -1;
2136
2137         if (il4 &&
2138             il4 != RTE_PTYPE_INNER_L4_TCP &&
2139             il4 != RTE_PTYPE_INNER_L4_UDP &&
2140             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2141             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2142             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2143             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2144                 return -1;
2145
2146         return 0;
2147 }
2148
2149 static int check_invalid_ptype_mapping(
2150                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2151                 uint16_t count)
2152 {
2153         int i;
2154
2155         for (i = 0; i < count; i++) {
2156                 uint16_t ptype = mapping_table[i].hw_ptype;
2157                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2158
2159                 if (ptype >= I40E_MAX_PKT_TYPE)
2160                         return -1;
2161
2162                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2163                         continue;
2164
2165                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2166                         continue;
2167
2168                 if (check_invalid_pkt_type(pkt_type))
2169                         return -1;
2170         }
2171
2172         return 0;
2173 }
2174
2175 int
2176 rte_pmd_i40e_ptype_mapping_update(
2177                         uint16_t port,
2178                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2179                         uint16_t count,
2180                         uint8_t exclusive)
2181 {
2182         struct rte_eth_dev *dev;
2183         struct i40e_adapter *ad;
2184         int i;
2185
2186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2187
2188         dev = &rte_eth_devices[port];
2189
2190         if (!is_i40e_supported(dev))
2191                 return -ENOTSUP;
2192
2193         if (count > I40E_MAX_PKT_TYPE)
2194                 return -EINVAL;
2195
2196         if (check_invalid_ptype_mapping(mapping_items, count))
2197                 return -EINVAL;
2198
2199         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2200
2201         if (exclusive) {
2202                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2203                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2204         }
2205
2206         for (i = 0; i < count; i++)
2207                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2208                         = mapping_items[i].sw_ptype;
2209
2210         return 0;
2211 }
2212
2213 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2214 {
2215         struct rte_eth_dev *dev;
2216
2217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2218
2219         dev = &rte_eth_devices[port];
2220
2221         if (!is_i40e_supported(dev))
2222                 return -ENOTSUP;
2223
2224         i40e_set_default_ptype_table(dev);
2225
2226         return 0;
2227 }
2228
2229 int rte_pmd_i40e_ptype_mapping_get(
2230                         uint16_t port,
2231                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2232                         uint16_t size,
2233                         uint16_t *count,
2234                         uint8_t valid_only)
2235 {
2236         struct rte_eth_dev *dev;
2237         struct i40e_adapter *ad;
2238         int n = 0;
2239         uint16_t i;
2240
2241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2242
2243         dev = &rte_eth_devices[port];
2244
2245         if (!is_i40e_supported(dev))
2246                 return -ENOTSUP;
2247
2248         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2249
2250         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2251                 if (n >= size)
2252                         break;
2253                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2254                         continue;
2255                 mapping_items[n].hw_ptype = i;
2256                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2257                 n++;
2258         }
2259
2260         *count = n;
2261         return 0;
2262 }
2263
2264 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2265                                        uint32_t target,
2266                                        uint8_t mask,
2267                                        uint32_t pkt_type)
2268 {
2269         struct rte_eth_dev *dev;
2270         struct i40e_adapter *ad;
2271         uint16_t i;
2272
2273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2274
2275         dev = &rte_eth_devices[port];
2276
2277         if (!is_i40e_supported(dev))
2278                 return -ENOTSUP;
2279
2280         if (!mask && check_invalid_pkt_type(target))
2281                 return -EINVAL;
2282
2283         if (check_invalid_pkt_type(pkt_type))
2284                 return -EINVAL;
2285
2286         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2287
2288         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2289                 if (mask) {
2290                         if ((target | ad->ptype_tbl[i]) == target &&
2291                             (target & ad->ptype_tbl[i]))
2292                                 ad->ptype_tbl[i] = pkt_type;
2293                 } else {
2294                         if (ad->ptype_tbl[i] == target)
2295                                 ad->ptype_tbl[i] = pkt_type;
2296                 }
2297         }
2298
2299         return 0;
2300 }
2301
2302 int
2303 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2304                              struct ether_addr *mac_addr)
2305 {
2306         struct rte_eth_dev *dev;
2307         struct i40e_pf_vf *vf;
2308         struct i40e_vsi *vsi;
2309         struct i40e_pf *pf;
2310         struct i40e_mac_filter_info mac_filter;
2311         int ret;
2312
2313         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2314                 return -EINVAL;
2315
2316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2317
2318         dev = &rte_eth_devices[port];
2319
2320         if (!is_i40e_supported(dev))
2321                 return -ENOTSUP;
2322
2323         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2324
2325         if (vf_id >= pf->vf_num || !pf->vfs)
2326                 return -EINVAL;
2327
2328         vf = &pf->vfs[vf_id];
2329         vsi = vf->vsi;
2330         if (!vsi) {
2331                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2332                 return -EINVAL;
2333         }
2334
2335         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2336         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2337         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2338         if (ret != I40E_SUCCESS) {
2339                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2340                 return -1;
2341         }
2342
2343         return 0;
2344 }
2345
2346 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2347 {
2348         struct rte_eth_dev *dev;
2349
2350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2351
2352         dev = &rte_eth_devices[port];
2353
2354         if (!is_i40e_supported(dev))
2355                 return -ENOTSUP;
2356
2357         i40e_set_default_pctype_table(dev);
2358
2359         return 0;
2360 }
2361
2362 int rte_pmd_i40e_flow_type_mapping_get(
2363                         uint16_t port,
2364                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2365 {
2366         struct rte_eth_dev *dev;
2367         struct i40e_adapter *ad;
2368         uint16_t i;
2369
2370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2371
2372         dev = &rte_eth_devices[port];
2373
2374         if (!is_i40e_supported(dev))
2375                 return -ENOTSUP;
2376
2377         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2378
2379         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2380                 mapping_items[i].flow_type = i;
2381                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2382         }
2383
2384         return 0;
2385 }
2386
2387 int
2388 rte_pmd_i40e_flow_type_mapping_update(
2389                         uint16_t port,
2390                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2391                         uint16_t count,
2392                         uint8_t exclusive)
2393 {
2394         struct rte_eth_dev *dev;
2395         struct i40e_adapter *ad;
2396         int i;
2397
2398         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2399
2400         dev = &rte_eth_devices[port];
2401
2402         if (!is_i40e_supported(dev))
2403                 return -ENOTSUP;
2404
2405         if (count > I40E_FLOW_TYPE_MAX)
2406                 return -EINVAL;
2407
2408         for (i = 0; i < count; i++)
2409                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2410                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2411                     (mapping_items[i].pctype &
2412                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2413                         return -EINVAL;
2414
2415         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2416
2417         if (exclusive) {
2418                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2419                         ad->pctypes_tbl[i] = 0ULL;
2420                 ad->flow_types_mask = 0ULL;
2421         }
2422
2423         for (i = 0; i < count; i++) {
2424                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2425                                                 mapping_items[i].pctype;
2426                 if (mapping_items[i].pctype)
2427                         ad->flow_types_mask |=
2428                                         (1ULL << mapping_items[i].flow_type);
2429                 else
2430                         ad->flow_types_mask &=
2431                                         ~(1ULL << mapping_items[i].flow_type);
2432         }
2433
2434         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2435                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2436
2437         return 0;
2438 }
2439
2440 int
2441 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2442 {
2443         struct rte_eth_dev *dev;
2444         struct ether_addr *mac;
2445         struct i40e_pf *pf;
2446         int vf_id;
2447         struct i40e_pf_vf *vf;
2448         uint16_t vf_num;
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2451         dev = &rte_eth_devices[port];
2452
2453         if (!is_i40e_supported(dev))
2454                 return -ENOTSUP;
2455
2456         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2457         vf_num = pf->vf_num;
2458
2459         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2460                 vf = &pf->vfs[vf_id];
2461                 mac = &vf->mac_addr;
2462
2463                 if (is_same_ether_addr(mac, vf_mac))
2464                         return vf_id;
2465         }
2466
2467         return -EINVAL;
2468 }
2469
2470 static int
2471 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2472                               struct i40e_pf *pf)
2473 {
2474         uint16_t i;
2475         struct i40e_vsi *vsi = pf->main_vsi;
2476         uint16_t queue_offset, bsf, tc_index;
2477         struct i40e_vsi_context ctxt;
2478         struct i40e_aqc_vsi_properties_data *vsi_info;
2479         struct i40e_queue_regions *region_info =
2480                                 &pf->queue_region;
2481         int32_t ret = -EINVAL;
2482
2483         if (!region_info->queue_region_number) {
2484                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2485                 return ret;
2486         }
2487
2488         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2489
2490         /* Update Queue Pairs Mapping for currently enabled UPs */
2491         ctxt.seid = vsi->seid;
2492         ctxt.pf_num = hw->pf_id;
2493         ctxt.vf_num = 0;
2494         ctxt.uplink_seid = vsi->uplink_seid;
2495         ctxt.info = vsi->info;
2496         vsi_info = &ctxt.info;
2497
2498         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2499         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2500
2501         /* Configure queue region and queue mapping parameters,
2502          * for enabled queue region, allocate queues to this region.
2503          */
2504
2505         for (i = 0; i < region_info->queue_region_number; i++) {
2506                 tc_index = region_info->region[i].region_id;
2507                 bsf = rte_bsf32(region_info->region[i].queue_num);
2508                 queue_offset = region_info->region[i].queue_start_index;
2509                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2510                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2511                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2512         }
2513
2514         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2515         vsi_info->mapping_flags |=
2516                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2517         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2518         vsi_info->valid_sections |=
2519                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2520
2521         /* Update the VSI after updating the VSI queue-mapping information */
2522         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2523         if (ret) {
2524                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2525                                 hw->aq.asq_last_status);
2526                 return ret;
2527         }
2528         /* update the local VSI info with updated queue map */
2529         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2530                                         sizeof(vsi->info.tc_mapping));
2531         rte_memcpy(&vsi->info.queue_mapping,
2532                         &ctxt.info.queue_mapping,
2533                         sizeof(vsi->info.queue_mapping));
2534         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2535         vsi->info.valid_sections = 0;
2536
2537         return 0;
2538 }
2539
2540
2541 static int
2542 i40e_queue_region_set_region(struct i40e_pf *pf,
2543                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2544 {
2545         uint16_t i;
2546         struct i40e_vsi *main_vsi = pf->main_vsi;
2547         struct i40e_queue_regions *info = &pf->queue_region;
2548         int32_t ret = -EINVAL;
2549
2550         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2551                                 conf_ptr->queue_num <= 64)) {
2552                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2553                         "total number of queues do not exceed the VSI allocation");
2554                 return ret;
2555         }
2556
2557         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2558                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2559                 return ret;
2560         }
2561
2562         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2563                                         > main_vsi->nb_used_qps) {
2564                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2565                 return ret;
2566         }
2567
2568         for (i = 0; i < info->queue_region_number; i++)
2569                 if (conf_ptr->region_id == info->region[i].region_id)
2570                         break;
2571
2572         if (i == info->queue_region_number &&
2573                                 i <= I40E_REGION_MAX_INDEX) {
2574                 info->region[i].region_id = conf_ptr->region_id;
2575                 info->region[i].queue_num = conf_ptr->queue_num;
2576                 info->region[i].queue_start_index =
2577                         conf_ptr->queue_start_index;
2578                 info->queue_region_number++;
2579         } else {
2580                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2581                 return ret;
2582         }
2583
2584         return 0;
2585 }
2586
2587 static int
2588 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2589                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2590 {
2591         int32_t ret = -EINVAL;
2592         struct i40e_queue_regions *info = &pf->queue_region;
2593         uint16_t i, j;
2594         uint16_t region_index, flowtype_index;
2595
2596         /* For the pctype or hardware flowtype of packet,
2597          * the specific index for each type has been defined
2598          * in file i40e_type.h as enum i40e_filter_pctype.
2599          */
2600
2601         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2602                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2603                 return ret;
2604         }
2605
2606         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2607                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2608                 return ret;
2609         }
2610
2611
2612         for (i = 0; i < info->queue_region_number; i++)
2613                 if (rss_region_conf->region_id == info->region[i].region_id)
2614                         break;
2615
2616         if (i == info->queue_region_number) {
2617                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2618                 ret = -EINVAL;
2619                 return ret;
2620         }
2621         region_index = i;
2622
2623         for (i = 0; i < info->queue_region_number; i++) {
2624                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2625                         if (rss_region_conf->hw_flowtype ==
2626                                 info->region[i].hw_flowtype[j]) {
2627                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2628                                 return 0;
2629                         }
2630                 }
2631         }
2632
2633         flowtype_index = info->region[region_index].flowtype_num;
2634         info->region[region_index].hw_flowtype[flowtype_index] =
2635                                         rss_region_conf->hw_flowtype;
2636         info->region[region_index].flowtype_num++;
2637
2638         return 0;
2639 }
2640
2641 static void
2642 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2643                                 struct i40e_pf *pf)
2644 {
2645         uint8_t hw_flowtype;
2646         uint32_t pfqf_hregion;
2647         uint16_t i, j, index;
2648         struct i40e_queue_regions *info = &pf->queue_region;
2649
2650         /* For the pctype or hardware flowtype of packet,
2651          * the specific index for each type has been defined
2652          * in file i40e_type.h as enum i40e_filter_pctype.
2653          */
2654
2655         for (i = 0; i < info->queue_region_number; i++) {
2656                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2657                         hw_flowtype = info->region[i].hw_flowtype[j];
2658                         index = hw_flowtype >> 3;
2659                         pfqf_hregion =
2660                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2661
2662                         if ((hw_flowtype & 0x7) == 0) {
2663                                 pfqf_hregion |= info->region[i].region_id <<
2664                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2665                                 pfqf_hregion |= 1 <<
2666                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2667                         } else if ((hw_flowtype & 0x7) == 1) {
2668                                 pfqf_hregion |= info->region[i].region_id  <<
2669                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2670                                 pfqf_hregion |= 1 <<
2671                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2672                         } else if ((hw_flowtype & 0x7) == 2) {
2673                                 pfqf_hregion |= info->region[i].region_id  <<
2674                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2675                                 pfqf_hregion |= 1 <<
2676                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2677                         } else if ((hw_flowtype & 0x7) == 3) {
2678                                 pfqf_hregion |= info->region[i].region_id  <<
2679                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2680                                 pfqf_hregion |= 1 <<
2681                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2682                         } else if ((hw_flowtype & 0x7) == 4) {
2683                                 pfqf_hregion |= info->region[i].region_id  <<
2684                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2685                                 pfqf_hregion |= 1 <<
2686                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2687                         } else if ((hw_flowtype & 0x7) == 5) {
2688                                 pfqf_hregion |= info->region[i].region_id  <<
2689                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2690                                 pfqf_hregion |= 1 <<
2691                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2692                         } else if ((hw_flowtype & 0x7) == 6) {
2693                                 pfqf_hregion |= info->region[i].region_id  <<
2694                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2695                                 pfqf_hregion |= 1 <<
2696                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2697                         } else {
2698                                 pfqf_hregion |= info->region[i].region_id  <<
2699                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2700                                 pfqf_hregion |= 1 <<
2701                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2702                         }
2703
2704                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2705                                                 pfqf_hregion);
2706                 }
2707         }
2708 }
2709
2710 static int
2711 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2712                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2713 {
2714         struct i40e_queue_regions *info = &pf->queue_region;
2715         int32_t ret = -EINVAL;
2716         uint16_t i, j, region_index;
2717
2718         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2719                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2720                 return ret;
2721         }
2722
2723         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2724                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2725                 return ret;
2726         }
2727
2728         for (i = 0; i < info->queue_region_number; i++)
2729                 if (rss_region_conf->region_id == info->region[i].region_id)
2730                         break;
2731
2732         if (i == info->queue_region_number) {
2733                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2734                 ret = -EINVAL;
2735                 return ret;
2736         }
2737
2738         region_index = i;
2739
2740         for (i = 0; i < info->queue_region_number; i++) {
2741                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2742                         if (info->region[i].user_priority[j] ==
2743                                 rss_region_conf->user_priority) {
2744                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2745                                 return 0;
2746                         }
2747                 }
2748         }
2749
2750         j = info->region[region_index].user_priority_num;
2751         info->region[region_index].user_priority[j] =
2752                                         rss_region_conf->user_priority;
2753         info->region[region_index].user_priority_num++;
2754
2755         return 0;
2756 }
2757
2758 static int
2759 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2760                                 struct i40e_pf *pf)
2761 {
2762         struct i40e_dcbx_config dcb_cfg_local;
2763         struct i40e_dcbx_config *dcb_cfg;
2764         struct i40e_queue_regions *info = &pf->queue_region;
2765         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2766         int32_t ret = -EINVAL;
2767         uint16_t i, j, prio_index, region_index;
2768         uint8_t tc_map, tc_bw, bw_lf;
2769
2770         if (!info->queue_region_number) {
2771                 PMD_DRV_LOG(ERR, "No queue region been set before");
2772                 return ret;
2773         }
2774
2775         dcb_cfg = &dcb_cfg_local;
2776         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2777
2778         /* assume each tc has the same bw */
2779         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2780         for (i = 0; i < info->queue_region_number; i++)
2781                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2782         /* to ensure the sum of tcbw is equal to 100 */
2783         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2784         for (i = 0; i < bw_lf; i++)
2785                 dcb_cfg->etscfg.tcbwtable[i]++;
2786
2787         /* assume each tc has the same Transmission Selection Algorithm */
2788         for (i = 0; i < info->queue_region_number; i++)
2789                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2790
2791         for (i = 0; i < info->queue_region_number; i++) {
2792                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2793                         prio_index = info->region[i].user_priority[j];
2794                         region_index = info->region[i].region_id;
2795                         dcb_cfg->etscfg.prioritytable[prio_index] =
2796                                                 region_index;
2797                 }
2798         }
2799
2800         /* FW needs one App to configure HW */
2801         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2802         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2803         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2804         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2805
2806         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2807
2808         dcb_cfg->pfc.willing = 0;
2809         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2810         dcb_cfg->pfc.pfcenable = tc_map;
2811
2812         /* Copy the new config to the current config */
2813         *old_cfg = *dcb_cfg;
2814         old_cfg->etsrec = old_cfg->etscfg;
2815         ret = i40e_set_dcb_config(hw);
2816
2817         if (ret) {
2818                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2819                          i40e_stat_str(hw, ret),
2820                          i40e_aq_str(hw, hw->aq.asq_last_status));
2821                 return ret;
2822         }
2823
2824         return 0;
2825 }
2826
2827 int
2828 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2829         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2830 {
2831         int32_t ret = -EINVAL;
2832         struct i40e_queue_regions *info = &pf->queue_region;
2833         struct i40e_vsi *main_vsi = pf->main_vsi;
2834
2835         if (on) {
2836                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2837
2838                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2839                 if (ret != I40E_SUCCESS) {
2840                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2841                         return ret;
2842                 }
2843
2844                 ret = i40e_queue_region_dcb_configure(hw, pf);
2845                 if (ret != I40E_SUCCESS) {
2846                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2847                         return ret;
2848                 }
2849
2850                 return 0;
2851         }
2852
2853         if (info->queue_region_number) {
2854                 info->queue_region_number = 1;
2855                 info->region[0].queue_num = main_vsi->nb_used_qps;
2856                 info->region[0].queue_start_index = 0;
2857
2858                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2859                 if (ret != I40E_SUCCESS)
2860                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2861
2862                 ret = i40e_dcb_init_configure(dev, TRUE);
2863                 if (ret != I40E_SUCCESS) {
2864                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2865                         pf->flags &= ~I40E_FLAG_DCB;
2866                 }
2867
2868                 i40e_init_queue_region_conf(dev);
2869         }
2870         return 0;
2871 }
2872
2873 static int
2874 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2875 {
2876         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2877         uint64_t hena;
2878
2879         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2880         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2881
2882         if (!hena)
2883                 return -ENOTSUP;
2884
2885         return 0;
2886 }
2887
2888 static int
2889 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2890                 struct i40e_queue_regions *regions_ptr)
2891 {
2892         struct i40e_queue_regions *info = &pf->queue_region;
2893
2894         rte_memcpy(regions_ptr, info,
2895                         sizeof(struct i40e_queue_regions));
2896
2897         return 0;
2898 }
2899
2900 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2901                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2902 {
2903         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2904         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2905         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2906         int32_t ret;
2907
2908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2909
2910         if (!is_i40e_supported(dev))
2911                 return -ENOTSUP;
2912
2913         if (!(!i40e_queue_region_pf_check_rss(pf)))
2914                 return -ENOTSUP;
2915
2916         /* This queue region feature only support pf by now. It should
2917          * be called after dev_start, and will be clear after dev_stop.
2918          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2919          * is just an enable function which server for other configuration,
2920          * it is for all configuration about queue region from up layer,
2921          * at first will only keep in DPDK softwarestored in driver,
2922          * only after "FLUSH_ON", it commit all configuration to HW.
2923          * Because PMD had to set hardware configuration at a time, so
2924          * it will record all up layer command at first.
2925          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2926          * just clean all configuration about queue region just now,
2927          * and restore all to DPDK i40e driver default
2928          * config when start up.
2929          */
2930
2931         switch (op_type) {
2932         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2933                 ret = i40e_queue_region_set_region(pf,
2934                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2935                 break;
2936         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2937                 ret = i40e_queue_region_set_flowtype(pf,
2938                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2939                 break;
2940         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2941                 ret = i40e_queue_region_set_user_priority(pf,
2942                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2943                 break;
2944         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2945                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2946                 break;
2947         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2948                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2949                 break;
2950         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2951                 ret = i40e_queue_region_get_all_info(pf,
2952                                 (struct i40e_queue_regions *)arg);
2953                 break;
2954         default:
2955                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2956                             op_type);
2957                 ret = -EINVAL;
2958         }
2959
2960         I40E_WRITE_FLUSH(hw);
2961
2962         return ret;
2963 }
2964
2965 int rte_pmd_i40e_flow_add_del_packet_template(
2966                         uint16_t port,
2967                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2968                         uint8_t add)
2969 {
2970         struct rte_eth_dev *dev = &rte_eth_devices[port];
2971         struct i40e_fdir_filter_conf filter_conf;
2972
2973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2974
2975         if (!is_i40e_supported(dev))
2976                 return -ENOTSUP;
2977
2978         memset(&filter_conf, 0, sizeof(filter_conf));
2979         filter_conf.soft_id = conf->soft_id;
2980         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
2981         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
2982         filter_conf.input.flow.raw_flow.length = conf->input.length;
2983         filter_conf.input.flow_ext.pkt_template = true;
2984
2985         filter_conf.action.rx_queue = conf->action.rx_queue;
2986         filter_conf.action.behavior =
2987                 (enum i40e_fdir_behavior)conf->action.behavior;
2988         filter_conf.action.report_status =
2989                 (enum i40e_fdir_status)conf->action.report_status;
2990         filter_conf.action.flex_off = conf->action.flex_off;
2991
2992         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
2993 }
2994
2995 int
2996 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
2997                        struct rte_pmd_i40e_inset *inset,
2998                        enum rte_pmd_i40e_inset_type inset_type)
2999 {
3000         struct rte_eth_dev *dev;
3001         struct i40e_hw *hw;
3002         uint64_t inset_reg;
3003         uint32_t mask_reg[2];
3004         int i;
3005
3006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3007
3008         dev = &rte_eth_devices[port];
3009
3010         if (!is_i40e_supported(dev))
3011                 return -ENOTSUP;
3012
3013         if (pctype > 63)
3014                 return -EINVAL;
3015
3016         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3017         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3018
3019         switch (inset_type) {
3020         case INSET_HASH:
3021                 /* Get input set */
3022                 inset_reg =
3023                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3024                 inset_reg <<= I40E_32_BIT_WIDTH;
3025                 inset_reg |=
3026                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3027                 /* Get field mask */
3028                 mask_reg[0] =
3029                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3030                 mask_reg[1] =
3031                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3032                 break;
3033         case INSET_FDIR:
3034                 inset_reg =
3035                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3036                 inset_reg <<= I40E_32_BIT_WIDTH;
3037                 inset_reg |=
3038                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3039                 mask_reg[0] =
3040                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3041                 mask_reg[1] =
3042                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3043                 break;
3044         case INSET_FDIR_FLX:
3045                 inset_reg =
3046                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3047                 mask_reg[0] =
3048                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3049                 mask_reg[1] =
3050                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3051                 break;
3052         default:
3053                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3054                 return -EINVAL;
3055         }
3056
3057         inset->inset = inset_reg;
3058
3059         for (i = 0; i < 2; i++) {
3060                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3061                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3062         }
3063
3064         return 0;
3065 }
3066
3067 int
3068 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3069                        struct rte_pmd_i40e_inset *inset,
3070                        enum rte_pmd_i40e_inset_type inset_type)
3071 {
3072         struct rte_eth_dev *dev;
3073         struct i40e_hw *hw;
3074         uint64_t inset_reg;
3075         uint32_t mask_reg[2];
3076         int i;
3077
3078         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3079
3080         dev = &rte_eth_devices[port];
3081
3082         if (!is_i40e_supported(dev))
3083                 return -ENOTSUP;
3084
3085         if (pctype > 63)
3086                 return -EINVAL;
3087
3088         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3089
3090         /* Clear mask first */
3091         for (i = 0; i < 2; i++)
3092                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
3093
3094         inset_reg = inset->inset;
3095         for (i = 0; i < 2; i++)
3096                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3097                         inset->mask[i].mask;
3098
3099         switch (inset_type) {
3100         case INSET_HASH:
3101                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3102                                      (uint32_t)(inset_reg & UINT32_MAX));
3103                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3104                                      (uint32_t)((inset_reg >>
3105                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3106                 for (i = 0; i < 2; i++)
3107                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
3108                                              mask_reg[i]);
3109                 break;
3110         case INSET_FDIR:
3111                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3112                                      (uint32_t)(inset_reg & UINT32_MAX));
3113                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3114                                      (uint32_t)((inset_reg >>
3115                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3116                 for (i = 0; i < 2; i++)
3117                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
3118                                              mask_reg[i]);
3119                 break;
3120         case INSET_FDIR_FLX:
3121                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3122                                      (uint32_t)(inset_reg & UINT32_MAX));
3123                 for (i = 0; i < 2; i++)
3124                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3125                                              mask_reg[i]);
3126                 break;
3127         default:
3128                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3129                 return -EINVAL;
3130         }
3131
3132         I40E_WRITE_FLUSH(hw);
3133         return 0;
3134 }