New upstream version 18.02
[deb_dpdk.git] / drivers / net / ixgbe / rte_pmd_ixgbe.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6
7 #include "base/ixgbe_api.h"
8 #include "ixgbe_ethdev.h"
9 #include "rte_pmd_ixgbe.h"
10
11 int
12 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
13                               struct ether_addr *mac_addr)
14 {
15         struct ixgbe_hw *hw;
16         struct ixgbe_vf_info *vfinfo;
17         int rar_entry;
18         uint8_t *new_mac = (uint8_t *)(mac_addr);
19         struct rte_eth_dev *dev;
20         struct rte_pci_device *pci_dev;
21
22         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
23
24         dev = &rte_eth_devices[port];
25         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
26
27         if (!is_ixgbe_supported(dev))
28                 return -ENOTSUP;
29
30         if (vf >= pci_dev->max_vfs)
31                 return -EINVAL;
32
33         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
34         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
35         rar_entry = hw->mac.num_rar_entries - (vf + 1);
36
37         if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
38                 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
39                            ETHER_ADDR_LEN);
40                 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
41                                            IXGBE_RAH_AV);
42         }
43         return -EINVAL;
44 }
45
46 int
47 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
48 {
49         struct ixgbe_hw *hw;
50         struct ixgbe_vf_info *vfinfo;
51         struct rte_eth_dev *dev;
52         struct rte_pci_device *pci_dev;
53         uint32_t ctrl;
54
55         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
56
57         dev = &rte_eth_devices[port];
58         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
59
60         if (!is_ixgbe_supported(dev))
61                 return -ENOTSUP;
62
63         if (vf >= pci_dev->max_vfs)
64                 return -EINVAL;
65
66         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
67         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
68
69         ctrl = IXGBE_PF_CONTROL_MSG;
70         if (vfinfo[vf].clear_to_send)
71                 ctrl |= IXGBE_VT_MSGTYPE_CTS;
72
73         ixgbe_write_mbx(hw, &ctrl, 1, vf);
74
75         return 0;
76 }
77
78 int
79 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
80 {
81         struct ixgbe_hw *hw;
82         struct ixgbe_mac_info *mac;
83         struct rte_eth_dev *dev;
84         struct rte_pci_device *pci_dev;
85
86         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
87
88         dev = &rte_eth_devices[port];
89         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
90
91         if (!is_ixgbe_supported(dev))
92                 return -ENOTSUP;
93
94         if (vf >= pci_dev->max_vfs)
95                 return -EINVAL;
96
97         if (on > 1)
98                 return -EINVAL;
99
100         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
101         mac = &hw->mac;
102
103         mac->ops.set_vlan_anti_spoofing(hw, on, vf);
104
105         return 0;
106 }
107
108 int
109 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
110 {
111         struct ixgbe_hw *hw;
112         struct ixgbe_mac_info *mac;
113         struct rte_eth_dev *dev;
114         struct rte_pci_device *pci_dev;
115
116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
117
118         dev = &rte_eth_devices[port];
119         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
120
121         if (!is_ixgbe_supported(dev))
122                 return -ENOTSUP;
123
124         if (vf >= pci_dev->max_vfs)
125                 return -EINVAL;
126
127         if (on > 1)
128                 return -EINVAL;
129
130         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
131         mac = &hw->mac;
132         mac->ops.set_mac_anti_spoofing(hw, on, vf);
133
134         return 0;
135 }
136
137 int
138 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
139 {
140         struct ixgbe_hw *hw;
141         uint32_t ctrl;
142         struct rte_eth_dev *dev;
143         struct rte_pci_device *pci_dev;
144
145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
146
147         dev = &rte_eth_devices[port];
148         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
149
150         if (!is_ixgbe_supported(dev))
151                 return -ENOTSUP;
152
153         if (vf >= pci_dev->max_vfs)
154                 return -EINVAL;
155
156         if (vlan_id > ETHER_MAX_VLAN_ID)
157                 return -EINVAL;
158
159         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
160         ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
161         if (vlan_id) {
162                 ctrl = vlan_id;
163                 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
164         } else {
165                 ctrl = 0;
166         }
167
168         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
169
170         return 0;
171 }
172
173 int
174 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
175 {
176         struct ixgbe_hw *hw;
177         uint32_t ctrl;
178         struct rte_eth_dev *dev;
179
180         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
181
182         dev = &rte_eth_devices[port];
183
184         if (!is_ixgbe_supported(dev))
185                 return -ENOTSUP;
186
187         if (on > 1)
188                 return -EINVAL;
189
190         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
191         ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
192         /* enable or disable VMDQ loopback */
193         if (on)
194                 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
195         else
196                 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
197
198         IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
199
200         return 0;
201 }
202
203 int
204 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
205 {
206         struct ixgbe_hw *hw;
207         uint32_t reg_value;
208         int i;
209         int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
210         struct rte_eth_dev *dev;
211
212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
213
214         dev = &rte_eth_devices[port];
215
216         if (!is_ixgbe_supported(dev))
217                 return -ENOTSUP;
218
219         if (on > 1)
220                 return -EINVAL;
221
222         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
223         for (i = 0; i <= num_queues; i++) {
224                 reg_value = IXGBE_QDE_WRITE |
225                                 (i << IXGBE_QDE_IDX_SHIFT) |
226                                 (on & IXGBE_QDE_ENABLE);
227                 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
228         }
229
230         return 0;
231 }
232
233 int
234 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
235 {
236         struct ixgbe_hw *hw;
237         uint32_t reg_value;
238         struct rte_eth_dev *dev;
239         struct rte_pci_device *pci_dev;
240
241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
242
243         dev = &rte_eth_devices[port];
244         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
245
246         if (!is_ixgbe_supported(dev))
247                 return -ENOTSUP;
248
249         /* only support VF's 0 to 63 */
250         if ((vf >= pci_dev->max_vfs) || (vf > 63))
251                 return -EINVAL;
252
253         if (on > 1)
254                 return -EINVAL;
255
256         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
257         reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
258         if (on)
259                 reg_value |= IXGBE_SRRCTL_DROP_EN;
260         else
261                 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
262
263         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
264
265         return 0;
266 }
267
268 int
269 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
270 {
271         struct rte_eth_dev *dev;
272         struct rte_pci_device *pci_dev;
273         struct ixgbe_hw *hw;
274         uint16_t queues_per_pool;
275         uint32_t q;
276
277         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
278
279         dev = &rte_eth_devices[port];
280         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
281         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
282
283         if (!is_ixgbe_supported(dev))
284                 return -ENOTSUP;
285
286         if (vf >= pci_dev->max_vfs)
287                 return -EINVAL;
288
289         if (on > 1)
290                 return -EINVAL;
291
292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
293
294         /* The PF has 128 queue pairs and in SRIOV configuration
295          * those queues will be assigned to VF's, so RXDCTL
296          * registers will be dealing with queues which will be
297          * assigned to VF's.
298          * Let's say we have SRIOV configured with 31 VF's then the
299          * first 124 queues 0-123 will be allocated to VF's and only
300          * the last 4 queues 123-127 will be assigned to the PF.
301          */
302         if (hw->mac.type == ixgbe_mac_82598EB)
303                 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
304                                   ETH_16_POOLS;
305         else
306                 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
307                                   ETH_64_POOLS;
308
309         for (q = 0; q < queues_per_pool; q++)
310                 (*dev->dev_ops->vlan_strip_queue_set)(dev,
311                                 q + vf * queues_per_pool, on);
312         return 0;
313 }
314
315 int
316 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
317                             uint16_t rx_mask, uint8_t on)
318 {
319         int val = 0;
320         struct rte_eth_dev *dev;
321         struct rte_pci_device *pci_dev;
322         struct ixgbe_hw *hw;
323         uint32_t vmolr;
324
325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
326
327         dev = &rte_eth_devices[port];
328         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
329
330         if (!is_ixgbe_supported(dev))
331                 return -ENOTSUP;
332
333         if (vf >= pci_dev->max_vfs)
334                 return -EINVAL;
335
336         if (on > 1)
337                 return -EINVAL;
338
339         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
340         vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
341
342         if (hw->mac.type == ixgbe_mac_82598EB) {
343                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
344                              " on 82599 hardware and newer");
345                 return -ENOTSUP;
346         }
347         if (ixgbe_vt_check(hw) < 0)
348                 return -ENOTSUP;
349
350         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
351
352         if (on)
353                 vmolr |= val;
354         else
355                 vmolr &= ~val;
356
357         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
358
359         return 0;
360 }
361
362 int
363 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
364 {
365         struct rte_eth_dev *dev;
366         struct rte_pci_device *pci_dev;
367         uint32_t reg, addr;
368         uint32_t val;
369         const uint8_t bit1 = 0x1;
370         struct ixgbe_hw *hw;
371
372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
373
374         dev = &rte_eth_devices[port];
375         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
376
377         if (!is_ixgbe_supported(dev))
378                 return -ENOTSUP;
379
380         if (vf >= pci_dev->max_vfs)
381                 return -EINVAL;
382
383         if (on > 1)
384                 return -EINVAL;
385
386         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387
388         if (ixgbe_vt_check(hw) < 0)
389                 return -ENOTSUP;
390
391         /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
392         if (vf >= 32) {
393                 addr = IXGBE_VFRE(1);
394                 val = bit1 << (vf - 32);
395         } else {
396                 addr = IXGBE_VFRE(0);
397                 val = bit1 << vf;
398         }
399
400         reg = IXGBE_READ_REG(hw, addr);
401
402         if (on)
403                 reg |= val;
404         else
405                 reg &= ~val;
406
407         IXGBE_WRITE_REG(hw, addr, reg);
408
409         return 0;
410 }
411
412 int
413 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
414 {
415         struct rte_eth_dev *dev;
416         struct rte_pci_device *pci_dev;
417         uint32_t reg, addr;
418         uint32_t val;
419         const uint8_t bit1 = 0x1;
420
421         struct ixgbe_hw *hw;
422
423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
424
425         dev = &rte_eth_devices[port];
426         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
427
428         if (!is_ixgbe_supported(dev))
429                 return -ENOTSUP;
430
431         if (vf >= pci_dev->max_vfs)
432                 return -EINVAL;
433
434         if (on > 1)
435                 return -EINVAL;
436
437         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438         if (ixgbe_vt_check(hw) < 0)
439                 return -ENOTSUP;
440
441         /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
442         if (vf >= 32) {
443                 addr = IXGBE_VFTE(1);
444                 val = bit1 << (vf - 32);
445         } else {
446                 addr = IXGBE_VFTE(0);
447                 val = bit1 << vf;
448         }
449
450         reg = IXGBE_READ_REG(hw, addr);
451
452         if (on)
453                 reg |= val;
454         else
455                 reg &= ~val;
456
457         IXGBE_WRITE_REG(hw, addr, reg);
458
459         return 0;
460 }
461
462 int
463 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
464                                  uint64_t vf_mask, uint8_t vlan_on)
465 {
466         struct rte_eth_dev *dev;
467         int ret = 0;
468         uint16_t vf_idx;
469         struct ixgbe_hw *hw;
470
471         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
472
473         dev = &rte_eth_devices[port];
474
475         if (!is_ixgbe_supported(dev))
476                 return -ENOTSUP;
477
478         if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
479                 return -EINVAL;
480
481         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
482         if (ixgbe_vt_check(hw) < 0)
483                 return -ENOTSUP;
484
485         for (vf_idx = 0; vf_idx < 64; vf_idx++) {
486                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
487                         ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
488                                                    vlan_on, false);
489                         if (ret < 0)
490                                 return ret;
491                 }
492         }
493
494         return ret;
495 }
496
497 int
498 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
499                                 uint16_t tx_rate, uint64_t q_msk)
500 {
501         struct rte_eth_dev *dev;
502
503         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
504
505         dev = &rte_eth_devices[port];
506
507         if (!is_ixgbe_supported(dev))
508                 return -ENOTSUP;
509
510         return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
511 }
512
513 int
514 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
515 {
516         struct ixgbe_hw *hw;
517         struct rte_eth_dev *dev;
518         uint32_t ctrl;
519
520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
521
522         dev = &rte_eth_devices[port];
523
524         if (!is_ixgbe_supported(dev))
525                 return -ENOTSUP;
526
527         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
528
529         /* Stop the data paths */
530         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
531                 return -ENOTSUP;
532         /**
533          * Workaround:
534          * As no ixgbe_disable_sec_rx_path equivalent is
535          * implemented for tx in the base code, and we are
536          * not allowed to modify the base code in DPDK, so
537          * just call the hand-written one directly for now.
538          * The hardware support has been checked by
539          * ixgbe_disable_sec_rx_path().
540          */
541         ixgbe_disable_sec_tx_path_generic(hw);
542
543         /* Enable Ethernet CRC (required by MACsec offload) */
544         ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
545         ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
546         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
547
548         /* Enable the TX and RX crypto engines */
549         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
550         ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
551         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
552
553         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
554         ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
555         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
556
557         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
558         ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
559         ctrl |= 0x3;
560         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
561
562         /* Enable SA lookup */
563         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
564         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
565         ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
566                      IXGBE_LSECTXCTRL_AUTH;
567         ctrl |= IXGBE_LSECTXCTRL_AISCI;
568         ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
569         ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
570         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
571
572         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
573         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
574         ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
575         ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
576         if (rp)
577                 ctrl |= IXGBE_LSECRXCTRL_RP;
578         else
579                 ctrl &= ~IXGBE_LSECRXCTRL_RP;
580         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
581
582         /* Start the data paths */
583         ixgbe_enable_sec_rx_path(hw);
584         /**
585          * Workaround:
586          * As no ixgbe_enable_sec_rx_path equivalent is
587          * implemented for tx in the base code, and we are
588          * not allowed to modify the base code in DPDK, so
589          * just call the hand-written one directly for now.
590          */
591         ixgbe_enable_sec_tx_path_generic(hw);
592
593         return 0;
594 }
595
596 int
597 rte_pmd_ixgbe_macsec_disable(uint16_t port)
598 {
599         struct ixgbe_hw *hw;
600         struct rte_eth_dev *dev;
601         uint32_t ctrl;
602
603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
604
605         dev = &rte_eth_devices[port];
606
607         if (!is_ixgbe_supported(dev))
608                 return -ENOTSUP;
609
610         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611
612         /* Stop the data paths */
613         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
614                 return -ENOTSUP;
615         /**
616          * Workaround:
617          * As no ixgbe_disable_sec_rx_path equivalent is
618          * implemented for tx in the base code, and we are
619          * not allowed to modify the base code in DPDK, so
620          * just call the hand-written one directly for now.
621          * The hardware support has been checked by
622          * ixgbe_disable_sec_rx_path().
623          */
624         ixgbe_disable_sec_tx_path_generic(hw);
625
626         /* Disable the TX and RX crypto engines */
627         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
628         ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
629         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
630
631         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
632         ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
633         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
634
635         /* Disable SA lookup */
636         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
637         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
638         ctrl |= IXGBE_LSECTXCTRL_DISABLE;
639         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
640
641         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
642         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
643         ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
644         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
645
646         /* Start the data paths */
647         ixgbe_enable_sec_rx_path(hw);
648         /**
649          * Workaround:
650          * As no ixgbe_enable_sec_rx_path equivalent is
651          * implemented for tx in the base code, and we are
652          * not allowed to modify the base code in DPDK, so
653          * just call the hand-written one directly for now.
654          */
655         ixgbe_enable_sec_tx_path_generic(hw);
656
657         return 0;
658 }
659
660 int
661 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
662 {
663         struct ixgbe_hw *hw;
664         struct rte_eth_dev *dev;
665         uint32_t ctrl;
666
667         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
668
669         dev = &rte_eth_devices[port];
670
671         if (!is_ixgbe_supported(dev))
672                 return -ENOTSUP;
673
674         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
675
676         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
677         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
678
679         ctrl = mac[4] | (mac[5] << 8);
680         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
681
682         return 0;
683 }
684
685 int
686 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
687 {
688         struct ixgbe_hw *hw;
689         struct rte_eth_dev *dev;
690         uint32_t ctrl;
691
692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_ixgbe_supported(dev))
697                 return -ENOTSUP;
698
699         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
700
701         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
702         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
703
704         pi = rte_cpu_to_be_16(pi);
705         ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
706         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
707
708         return 0;
709 }
710
711 int
712 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
713                                  uint32_t pn, uint8_t *key)
714 {
715         struct ixgbe_hw *hw;
716         struct rte_eth_dev *dev;
717         uint32_t ctrl, i;
718
719         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
720
721         dev = &rte_eth_devices[port];
722
723         if (!is_ixgbe_supported(dev))
724                 return -ENOTSUP;
725
726         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
727
728         if (idx != 0 && idx != 1)
729                 return -EINVAL;
730
731         if (an >= 4)
732                 return -EINVAL;
733
734         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
735
736         /* Set the PN and key */
737         pn = rte_cpu_to_be_32(pn);
738         if (idx == 0) {
739                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
740
741                 for (i = 0; i < 4; i++) {
742                         ctrl = (key[i * 4 + 0] <<  0) |
743                                (key[i * 4 + 1] <<  8) |
744                                (key[i * 4 + 2] << 16) |
745                                (key[i * 4 + 3] << 24);
746                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
747                 }
748         } else {
749                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
750
751                 for (i = 0; i < 4; i++) {
752                         ctrl = (key[i * 4 + 0] <<  0) |
753                                (key[i * 4 + 1] <<  8) |
754                                (key[i * 4 + 2] << 16) |
755                                (key[i * 4 + 3] << 24);
756                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
757                 }
758         }
759
760         /* Set AN and select the SA */
761         ctrl = (an << idx * 2) | (idx << 4);
762         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
763
764         return 0;
765 }
766
767 int
768 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
769                                  uint32_t pn, uint8_t *key)
770 {
771         struct ixgbe_hw *hw;
772         struct rte_eth_dev *dev;
773         uint32_t ctrl, i;
774
775         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
776
777         dev = &rte_eth_devices[port];
778
779         if (!is_ixgbe_supported(dev))
780                 return -ENOTSUP;
781
782         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
783
784         if (idx != 0 && idx != 1)
785                 return -EINVAL;
786
787         if (an >= 4)
788                 return -EINVAL;
789
790         /* Set the PN */
791         pn = rte_cpu_to_be_32(pn);
792         IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
793
794         /* Set the key */
795         for (i = 0; i < 4; i++) {
796                 ctrl = (key[i * 4 + 0] <<  0) |
797                        (key[i * 4 + 1] <<  8) |
798                        (key[i * 4 + 2] << 16) |
799                        (key[i * 4 + 3] << 24);
800                 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
801         }
802
803         /* Set the AN and validate the SA */
804         ctrl = an | (1 << 2);
805         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
806
807         return 0;
808 }
809
810 int
811 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
812                               uint8_t tc_num,
813                               uint8_t *bw_weight)
814 {
815         struct rte_eth_dev *dev;
816         struct ixgbe_dcb_config *dcb_config;
817         struct ixgbe_dcb_tc_config *tc;
818         struct rte_eth_conf *eth_conf;
819         struct ixgbe_bw_conf *bw_conf;
820         uint8_t i;
821         uint8_t nb_tcs;
822         uint16_t sum;
823
824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
825
826         dev = &rte_eth_devices[port];
827
828         if (!is_ixgbe_supported(dev))
829                 return -ENOTSUP;
830
831         if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
832                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
833                             IXGBE_DCB_MAX_TRAFFIC_CLASS);
834                 return -EINVAL;
835         }
836
837         dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
838         bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
839         eth_conf = &dev->data->dev_conf;
840
841         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
842                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
843         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
844                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
845                     ETH_32_POOLS)
846                         nb_tcs = ETH_4_TCS;
847                 else
848                         nb_tcs = ETH_8_TCS;
849         } else {
850                 nb_tcs = 1;
851         }
852
853         if (nb_tcs != tc_num) {
854                 PMD_DRV_LOG(ERR,
855                             "Weight should be set for all %d enabled TCs.",
856                             nb_tcs);
857                 return -EINVAL;
858         }
859
860         sum = 0;
861         for (i = 0; i < nb_tcs; i++)
862                 sum += bw_weight[i];
863         if (sum != 100) {
864                 PMD_DRV_LOG(ERR,
865                             "The summary of the TC weight should be 100.");
866                 return -EINVAL;
867         }
868
869         for (i = 0; i < nb_tcs; i++) {
870                 tc = &dcb_config->tc_config[i];
871                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
872         }
873         for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
874                 tc = &dcb_config->tc_config[i];
875                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
876         }
877
878         bw_conf->tc_num = nb_tcs;
879
880         return 0;
881 }
882
883 #ifdef RTE_LIBRTE_IXGBE_BYPASS
884 int
885 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
886 {
887         struct rte_eth_dev *dev;
888
889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
890
891         dev = &rte_eth_devices[port_id];
892         if (!is_ixgbe_supported(dev))
893                 return -ENOTSUP;
894
895         ixgbe_bypass_init(dev);
896         return 0;
897 }
898
899 int
900 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
901 {
902         struct rte_eth_dev *dev;
903
904         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
905
906         dev = &rte_eth_devices[port_id];
907         if (!is_ixgbe_supported(dev))
908                 return -ENOTSUP;
909
910         return ixgbe_bypass_state_show(dev, state);
911 }
912
913 int
914 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
915 {
916         struct rte_eth_dev *dev;
917
918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
919
920         dev = &rte_eth_devices[port_id];
921         if (!is_ixgbe_supported(dev))
922                 return -ENOTSUP;
923
924         return ixgbe_bypass_state_store(dev, new_state);
925 }
926
927 int
928 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
929                                 uint32_t event,
930                                 uint32_t *state)
931 {
932         struct rte_eth_dev *dev;
933
934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
935
936         dev = &rte_eth_devices[port_id];
937         if (!is_ixgbe_supported(dev))
938                 return -ENOTSUP;
939
940         return ixgbe_bypass_event_show(dev, event, state);
941 }
942
943 int
944 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
945                                  uint32_t event,
946                                  uint32_t state)
947 {
948         struct rte_eth_dev *dev;
949
950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
951
952         dev = &rte_eth_devices[port_id];
953         if (!is_ixgbe_supported(dev))
954                 return -ENOTSUP;
955
956         return ixgbe_bypass_event_store(dev, event, state);
957 }
958
959 int
960 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
961 {
962         struct rte_eth_dev *dev;
963
964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
965
966         dev = &rte_eth_devices[port_id];
967         if (!is_ixgbe_supported(dev))
968                 return -ENOTSUP;
969
970         return ixgbe_bypass_wd_timeout_store(dev, timeout);
971 }
972
973 int
974 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
975 {
976         struct rte_eth_dev *dev;
977
978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
979
980         dev = &rte_eth_devices[port_id];
981         if (!is_ixgbe_supported(dev))
982                 return -ENOTSUP;
983
984         return ixgbe_bypass_ver_show(dev, ver);
985 }
986
987 int
988 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
989 {
990         struct rte_eth_dev *dev;
991
992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
993
994         dev = &rte_eth_devices[port_id];
995         if (!is_ixgbe_supported(dev))
996                 return -ENOTSUP;
997
998         return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
999 }
1000
1001 int
1002 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
1003 {
1004         struct rte_eth_dev *dev;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1007
1008         dev = &rte_eth_devices[port_id];
1009         if (!is_ixgbe_supported(dev))
1010                 return -ENOTSUP;
1011
1012         return ixgbe_bypass_wd_reset(dev);
1013 }
1014 #endif