4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
41 #include <rte_cycles.h>
43 #include "failsafe_private.h"
45 static struct rte_eth_dev_info default_infos = {
46 /* Max possible number of elements */
47 .max_rx_pktlen = UINT32_MAX,
48 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
49 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
50 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
51 .max_hash_mac_addrs = UINT32_MAX,
52 .max_vfs = UINT16_MAX,
53 .max_vmdq_pools = UINT16_MAX,
58 .nb_seg_max = UINT16_MAX,
59 .nb_mtu_seg_max = UINT16_MAX,
65 .nb_seg_max = UINT16_MAX,
66 .nb_mtu_seg_max = UINT16_MAX,
69 * Set of capabilities that can be verified upon
70 * configuring a sub-device.
73 DEV_RX_OFFLOAD_VLAN_STRIP |
74 DEV_RX_OFFLOAD_QINQ_STRIP |
75 DEV_RX_OFFLOAD_IPV4_CKSUM |
76 DEV_RX_OFFLOAD_UDP_CKSUM |
77 DEV_RX_OFFLOAD_TCP_CKSUM |
78 DEV_RX_OFFLOAD_TCP_LRO,
79 .tx_offload_capa = 0x0,
80 .flow_type_rss_offloads = 0x0,
84 fs_dev_configure(struct rte_eth_dev *dev)
86 struct sub_device *sdev;
90 FOREACH_SUBDEV(sdev, i, dev) {
91 int rmv_interrupt = 0;
92 int lsc_interrupt = 0;
95 if (sdev->state != DEV_PROBED)
98 rmv_interrupt = ETH(sdev)->data->dev_flags &
101 DEBUG("Enabling RMV interrupts for sub_device %d", i);
102 dev->data->dev_conf.intr_conf.rmv = 1;
104 DEBUG("sub_device %d does not support RMV event", i);
106 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
107 lsc_interrupt = lsc_enabled &&
108 (ETH(sdev)->data->dev_flags &
109 RTE_ETH_DEV_INTR_LSC);
111 DEBUG("Enabling LSC interrupts for sub_device %d", i);
112 dev->data->dev_conf.intr_conf.lsc = 1;
113 } else if (lsc_enabled && !lsc_interrupt) {
114 DEBUG("Disabling LSC interrupts for sub_device %d", i);
115 dev->data->dev_conf.intr_conf.lsc = 0;
117 DEBUG("Configuring sub-device %d", i);
119 ret = rte_eth_dev_configure(PORT_ID(sdev),
120 dev->data->nb_rx_queues,
121 dev->data->nb_tx_queues,
122 &dev->data->dev_conf);
124 ERROR("Could not configure sub_device %d", i);
127 if (rmv_interrupt && sdev->rmv_callback == 0) {
128 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
129 RTE_ETH_EVENT_INTR_RMV,
130 failsafe_eth_rmv_event_callback,
133 WARN("Failed to register RMV callback for sub_device %d",
136 sdev->rmv_callback = 1;
138 dev->data->dev_conf.intr_conf.rmv = 0;
139 if (lsc_interrupt && sdev->lsc_callback == 0) {
140 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
141 RTE_ETH_EVENT_INTR_LSC,
142 failsafe_eth_lsc_event_callback,
145 WARN("Failed to register LSC callback for sub_device %d",
148 sdev->lsc_callback = 1;
150 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
151 sdev->state = DEV_ACTIVE;
153 if (PRIV(dev)->state < DEV_ACTIVE)
154 PRIV(dev)->state = DEV_ACTIVE;
159 fs_dev_start(struct rte_eth_dev *dev)
161 struct sub_device *sdev;
165 FOREACH_SUBDEV(sdev, i, dev) {
166 if (sdev->state != DEV_ACTIVE)
168 DEBUG("Starting sub_device %d", i);
169 ret = rte_eth_dev_start(PORT_ID(sdev));
172 sdev->state = DEV_STARTED;
174 if (PRIV(dev)->state < DEV_STARTED)
175 PRIV(dev)->state = DEV_STARTED;
176 fs_switch_dev(dev, NULL);
181 fs_dev_stop(struct rte_eth_dev *dev)
183 struct sub_device *sdev;
186 PRIV(dev)->state = DEV_STARTED - 1;
187 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
188 rte_eth_dev_stop(PORT_ID(sdev));
189 sdev->state = DEV_STARTED - 1;
194 fs_dev_set_link_up(struct rte_eth_dev *dev)
196 struct sub_device *sdev;
200 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
201 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
202 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
204 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
205 " with error %d", i, ret);
213 fs_dev_set_link_down(struct rte_eth_dev *dev)
215 struct sub_device *sdev;
219 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
220 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
221 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
223 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
224 " with error %d", i, ret);
231 static void fs_dev_free_queues(struct rte_eth_dev *dev);
233 fs_dev_close(struct rte_eth_dev *dev)
235 struct sub_device *sdev;
238 failsafe_hotplug_alarm_cancel(dev);
239 if (PRIV(dev)->state == DEV_STARTED)
240 dev->dev_ops->dev_stop(dev);
241 PRIV(dev)->state = DEV_ACTIVE - 1;
242 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
243 DEBUG("Closing sub_device %d", i);
244 failsafe_eth_dev_unregister_callbacks(sdev);
245 rte_eth_dev_close(PORT_ID(sdev));
246 sdev->state = DEV_ACTIVE - 1;
248 fs_dev_free_queues(dev);
252 fs_rx_queue_release(void *queue)
254 struct rte_eth_dev *dev;
255 struct sub_device *sdev;
262 dev = rxq->priv->dev;
263 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
264 if (ETH(sdev)->data->rx_queues != NULL &&
265 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
266 SUBOPS(sdev, rx_queue_release)
267 (ETH(sdev)->data->rx_queues[rxq->qid]);
270 dev->data->rx_queues[rxq->qid] = NULL;
275 fs_rx_queue_setup(struct rte_eth_dev *dev,
276 uint16_t rx_queue_id,
278 unsigned int socket_id,
279 const struct rte_eth_rxconf *rx_conf,
280 struct rte_mempool *mb_pool)
282 struct sub_device *sdev;
287 if (rx_conf->rx_deferred_start) {
288 ERROR("Rx queue deferred start is not supported");
292 rxq = dev->data->rx_queues[rx_queue_id];
294 fs_rx_queue_release(rxq);
295 dev->data->rx_queues[rx_queue_id] = NULL;
297 rxq = rte_zmalloc(NULL,
299 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
300 RTE_CACHE_LINE_SIZE);
303 FOREACH_SUBDEV(sdev, i, dev)
304 rte_atomic64_init(&rxq->refcnt[i]);
305 rxq->qid = rx_queue_id;
306 rxq->socket_id = socket_id;
307 rxq->info.mp = mb_pool;
308 rxq->info.conf = *rx_conf;
309 rxq->info.nb_desc = nb_rx_desc;
310 rxq->priv = PRIV(dev);
311 dev->data->rx_queues[rx_queue_id] = rxq;
312 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
313 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
315 nb_rx_desc, socket_id,
318 ERROR("RX queue setup failed for sub_device %d", i);
324 fs_rx_queue_release(rxq);
329 fs_tx_queue_release(void *queue)
331 struct rte_eth_dev *dev;
332 struct sub_device *sdev;
339 dev = txq->priv->dev;
340 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
341 if (ETH(sdev)->data->tx_queues != NULL &&
342 ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
343 SUBOPS(sdev, tx_queue_release)
344 (ETH(sdev)->data->tx_queues[txq->qid]);
347 dev->data->tx_queues[txq->qid] = NULL;
352 fs_tx_queue_setup(struct rte_eth_dev *dev,
353 uint16_t tx_queue_id,
355 unsigned int socket_id,
356 const struct rte_eth_txconf *tx_conf)
358 struct sub_device *sdev;
363 if (tx_conf->tx_deferred_start) {
364 ERROR("Tx queue deferred start is not supported");
368 txq = dev->data->tx_queues[tx_queue_id];
370 fs_tx_queue_release(txq);
371 dev->data->tx_queues[tx_queue_id] = NULL;
373 txq = rte_zmalloc("ethdev TX queue",
375 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
376 RTE_CACHE_LINE_SIZE);
379 FOREACH_SUBDEV(sdev, i, dev)
380 rte_atomic64_init(&txq->refcnt[i]);
381 txq->qid = tx_queue_id;
382 txq->socket_id = socket_id;
383 txq->info.conf = *tx_conf;
384 txq->info.nb_desc = nb_tx_desc;
385 txq->priv = PRIV(dev);
386 dev->data->tx_queues[tx_queue_id] = txq;
387 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
388 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
390 nb_tx_desc, socket_id,
393 ERROR("TX queue setup failed for sub_device %d", i);
399 fs_tx_queue_release(txq);
404 fs_dev_free_queues(struct rte_eth_dev *dev)
408 for (i = 0; i < dev->data->nb_rx_queues; i++) {
409 fs_rx_queue_release(dev->data->rx_queues[i]);
410 dev->data->rx_queues[i] = NULL;
412 dev->data->nb_rx_queues = 0;
413 for (i = 0; i < dev->data->nb_tx_queues; i++) {
414 fs_tx_queue_release(dev->data->tx_queues[i]);
415 dev->data->tx_queues[i] = NULL;
417 dev->data->nb_tx_queues = 0;
421 fs_promiscuous_enable(struct rte_eth_dev *dev)
423 struct sub_device *sdev;
426 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
427 rte_eth_promiscuous_enable(PORT_ID(sdev));
431 fs_promiscuous_disable(struct rte_eth_dev *dev)
433 struct sub_device *sdev;
436 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
437 rte_eth_promiscuous_disable(PORT_ID(sdev));
441 fs_allmulticast_enable(struct rte_eth_dev *dev)
443 struct sub_device *sdev;
446 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
447 rte_eth_allmulticast_enable(PORT_ID(sdev));
451 fs_allmulticast_disable(struct rte_eth_dev *dev)
453 struct sub_device *sdev;
456 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
457 rte_eth_allmulticast_disable(PORT_ID(sdev));
461 fs_link_update(struct rte_eth_dev *dev,
462 int wait_to_complete)
464 struct sub_device *sdev;
468 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
469 DEBUG("Calling link_update on sub_device %d", i);
470 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
471 if (ret && ret != -1) {
472 ERROR("Link update failed for sub_device %d with error %d",
477 if (TX_SUBDEV(dev)) {
478 struct rte_eth_link *l1;
479 struct rte_eth_link *l2;
481 l1 = &dev->data->dev_link;
482 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
483 if (memcmp(l1, l2, sizeof(*l1))) {
492 fs_stats_get(struct rte_eth_dev *dev,
493 struct rte_eth_stats *stats)
495 struct sub_device *sdev;
499 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
500 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
501 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
502 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
504 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
506 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
511 *timestamp = rte_rdtsc();
512 failsafe_stats_increment(stats, snapshot);
518 fs_stats_reset(struct rte_eth_dev *dev)
520 struct sub_device *sdev;
523 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
524 rte_eth_stats_reset(PORT_ID(sdev));
525 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
527 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
531 * Fail-safe dev_infos_get rules:
535 * Use the maximum possible values for any field, so as not
536 * to impede any further configuration effort.
538 * Limits capabilities to those that are understood by the
539 * fail-safe PMD. This understanding stems from the fail-safe
540 * being capable of verifying that the related capability is
541 * expressed within the device configuration (struct rte_eth_conf).
543 * At least one probed sub_device:
545 * Uses values from the active probed sub_device
546 * The rationale here is that if any sub_device is less capable
547 * (for example concerning the number of queues) than the active
548 * sub_device, then its subsequent configuration will fail.
549 * It is impossible to foresee this failure when the failing sub_device
550 * is supposed to be plugged-in later on, so the configuration process
551 * is the single point of failure and error reporting.
553 * Uses a logical AND of RX capabilities among
554 * all sub_devices and the default capabilities.
555 * Uses a logical AND of TX capabilities among
556 * the active probed sub_device and the default capabilities.
560 fs_dev_infos_get(struct rte_eth_dev *dev,
561 struct rte_eth_dev_info *infos)
563 struct sub_device *sdev;
566 sdev = TX_SUBDEV(dev);
568 DEBUG("No probed device, using default infos");
569 rte_memcpy(&PRIV(dev)->infos, &default_infos,
570 sizeof(default_infos));
572 uint32_t rx_offload_capa;
574 rx_offload_capa = default_infos.rx_offload_capa;
575 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
576 rte_eth_dev_info_get(PORT_ID(sdev),
578 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
580 sdev = TX_SUBDEV(dev);
581 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
582 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
583 PRIV(dev)->infos.tx_offload_capa &=
584 default_infos.tx_offload_capa;
585 PRIV(dev)->infos.flow_type_rss_offloads &=
586 default_infos.flow_type_rss_offloads;
588 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
591 static const uint32_t *
592 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
594 struct sub_device *sdev;
595 struct rte_eth_dev *edev;
597 sdev = TX_SUBDEV(dev);
601 /* ENOTSUP: counts as no supported ptypes */
602 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
605 * The API does not permit to do a clean AND of all ptypes,
606 * It is also incomplete by design and we do not really care
607 * to have a best possible value in this context.
608 * We just return the ptypes of the device of highest
609 * priority, usually the PREFERRED device.
611 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
615 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
617 struct sub_device *sdev;
621 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
622 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
623 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
625 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
634 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
636 struct sub_device *sdev;
640 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
641 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
642 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
644 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
645 " with error %d", i, ret);
653 fs_flow_ctrl_get(struct rte_eth_dev *dev,
654 struct rte_eth_fc_conf *fc_conf)
656 struct sub_device *sdev;
658 sdev = TX_SUBDEV(dev);
661 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
663 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
667 fs_flow_ctrl_set(struct rte_eth_dev *dev,
668 struct rte_eth_fc_conf *fc_conf)
670 struct sub_device *sdev;
674 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
675 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
676 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
678 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
679 " with error %d", i, ret);
687 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
689 struct sub_device *sdev;
692 /* No check: already done within the rte_eth_dev_mac_addr_remove
693 * call for the fail-safe device.
695 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
696 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
697 &dev->data->mac_addrs[index]);
698 PRIV(dev)->mac_addr_pool[index] = 0;
702 fs_mac_addr_add(struct rte_eth_dev *dev,
703 struct ether_addr *mac_addr,
707 struct sub_device *sdev;
711 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
712 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
713 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
715 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
716 PRIu8 " with error %d", i, ret);
720 if (index >= PRIV(dev)->nb_mac_addr) {
721 DEBUG("Growing mac_addrs array");
722 PRIV(dev)->nb_mac_addr = index;
724 PRIV(dev)->mac_addr_pool[index] = vmdq;
729 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
731 struct sub_device *sdev;
734 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
735 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
739 fs_filter_ctrl(struct rte_eth_dev *dev,
740 enum rte_filter_type type,
741 enum rte_filter_op op,
744 struct sub_device *sdev;
748 if (type == RTE_ETH_FILTER_GENERIC &&
749 op == RTE_ETH_FILTER_GET) {
750 *(const void **)arg = &fs_flow_ops;
753 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
754 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
755 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
757 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
758 " with error %d", i, ret);
765 const struct eth_dev_ops failsafe_ops = {
766 .dev_configure = fs_dev_configure,
767 .dev_start = fs_dev_start,
768 .dev_stop = fs_dev_stop,
769 .dev_set_link_down = fs_dev_set_link_down,
770 .dev_set_link_up = fs_dev_set_link_up,
771 .dev_close = fs_dev_close,
772 .promiscuous_enable = fs_promiscuous_enable,
773 .promiscuous_disable = fs_promiscuous_disable,
774 .allmulticast_enable = fs_allmulticast_enable,
775 .allmulticast_disable = fs_allmulticast_disable,
776 .link_update = fs_link_update,
777 .stats_get = fs_stats_get,
778 .stats_reset = fs_stats_reset,
779 .dev_infos_get = fs_dev_infos_get,
780 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
781 .mtu_set = fs_mtu_set,
782 .vlan_filter_set = fs_vlan_filter_set,
783 .rx_queue_setup = fs_rx_queue_setup,
784 .tx_queue_setup = fs_tx_queue_setup,
785 .rx_queue_release = fs_rx_queue_release,
786 .tx_queue_release = fs_tx_queue_release,
787 .flow_ctrl_get = fs_flow_ctrl_get,
788 .flow_ctrl_set = fs_flow_ctrl_set,
789 .mac_addr_remove = fs_mac_addr_remove,
790 .mac_addr_add = fs_mac_addr_add,
791 .mac_addr_set = fs_mac_addr_set,
792 .filter_ctrl = fs_filter_ctrl,