New upstream version 17.08
[deb_dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_flow.h>
41
42 #include "failsafe_private.h"
43
44 static struct rte_eth_dev_info default_infos = {
45         /* Max possible number of elements */
46         .max_rx_pktlen = UINT32_MAX,
47         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
48         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
49         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
50         .max_hash_mac_addrs = UINT32_MAX,
51         .max_vfs = UINT16_MAX,
52         .max_vmdq_pools = UINT16_MAX,
53         .rx_desc_lim = {
54                 .nb_max = UINT16_MAX,
55                 .nb_min = 0,
56                 .nb_align = 1,
57                 .nb_seg_max = UINT16_MAX,
58                 .nb_mtu_seg_max = UINT16_MAX,
59         },
60         .tx_desc_lim = {
61                 .nb_max = UINT16_MAX,
62                 .nb_min = 0,
63                 .nb_align = 1,
64                 .nb_seg_max = UINT16_MAX,
65                 .nb_mtu_seg_max = UINT16_MAX,
66         },
67         /*
68          * Set of capabilities that can be verified upon
69          * configuring a sub-device.
70          */
71         .rx_offload_capa =
72                 DEV_RX_OFFLOAD_VLAN_STRIP |
73                 DEV_RX_OFFLOAD_QINQ_STRIP |
74                 DEV_RX_OFFLOAD_IPV4_CKSUM |
75                 DEV_RX_OFFLOAD_UDP_CKSUM |
76                 DEV_RX_OFFLOAD_TCP_CKSUM |
77                 DEV_RX_OFFLOAD_TCP_LRO,
78         .tx_offload_capa = 0x0,
79         .flow_type_rss_offloads = 0x0,
80 };
81
82 /**
83  * Check whether a specific offloading capability
84  * is supported by a sub_device.
85  *
86  * @return
87  *   0: all requested capabilities are supported by the sub_device
88  *   positive value: This flag at least is not supported by the sub_device
89  */
90 static int
91 fs_port_offload_validate(struct rte_eth_dev *dev,
92                          struct sub_device *sdev)
93 {
94         struct rte_eth_dev_info infos = {0};
95         struct rte_eth_conf *cf;
96         uint32_t cap;
97
98         cf = &dev->data->dev_conf;
99         SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
100         /* RX capabilities */
101         cap = infos.rx_offload_capa;
102         if (cf->rxmode.hw_vlan_strip &&
103             ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
104                 WARN("VLAN stripping offload requested but not supported by sub_device %d",
105                       SUB_ID(sdev));
106                 return DEV_RX_OFFLOAD_VLAN_STRIP;
107         }
108         if (cf->rxmode.hw_ip_checksum &&
109             ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
110                      DEV_RX_OFFLOAD_UDP_CKSUM |
111                      DEV_RX_OFFLOAD_TCP_CKSUM)) !=
112              (DEV_RX_OFFLOAD_IPV4_CKSUM |
113               DEV_RX_OFFLOAD_UDP_CKSUM |
114               DEV_RX_OFFLOAD_TCP_CKSUM))) {
115                 WARN("IP checksum offload requested but not supported by sub_device %d",
116                       SUB_ID(sdev));
117                 return DEV_RX_OFFLOAD_IPV4_CKSUM |
118                        DEV_RX_OFFLOAD_UDP_CKSUM |
119                        DEV_RX_OFFLOAD_TCP_CKSUM;
120         }
121         if (cf->rxmode.enable_lro &&
122             ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
123                 WARN("TCP LRO offload requested but not supported by sub_device %d",
124                       SUB_ID(sdev));
125                 return DEV_RX_OFFLOAD_TCP_LRO;
126         }
127         if (cf->rxmode.hw_vlan_extend &&
128             ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
129                 WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
130                       SUB_ID(sdev));
131                 return DEV_RX_OFFLOAD_QINQ_STRIP;
132         }
133         /* TX capabilities */
134         /* Nothing to do, no tx capa supported */
135         return 0;
136 }
137
138 /*
139  * Disable the dev_conf flag related to an offload capability flag
140  * within an ethdev configuration.
141  */
142 static int
143 fs_port_disable_offload(struct rte_eth_conf *cf,
144                         uint32_t ol_cap)
145 {
146         switch (ol_cap) {
147         case DEV_RX_OFFLOAD_VLAN_STRIP:
148                 INFO("Disabling VLAN stripping offload");
149                 cf->rxmode.hw_vlan_strip = 0;
150                 break;
151         case DEV_RX_OFFLOAD_IPV4_CKSUM:
152         case DEV_RX_OFFLOAD_UDP_CKSUM:
153         case DEV_RX_OFFLOAD_TCP_CKSUM:
154         case (DEV_RX_OFFLOAD_IPV4_CKSUM |
155               DEV_RX_OFFLOAD_UDP_CKSUM |
156               DEV_RX_OFFLOAD_TCP_CKSUM):
157                 INFO("Disabling IP checksum offload");
158                 cf->rxmode.hw_ip_checksum = 0;
159                 break;
160         case DEV_RX_OFFLOAD_TCP_LRO:
161                 INFO("Disabling TCP LRO offload");
162                 cf->rxmode.enable_lro = 0;
163                 break;
164         case DEV_RX_OFFLOAD_QINQ_STRIP:
165                 INFO("Disabling stacked VLAN stripping offload");
166                 cf->rxmode.hw_vlan_extend = 0;
167                 break;
168         default:
169                 DEBUG("Unable to disable offload capability: %" PRIx32,
170                       ol_cap);
171                 return -1;
172         }
173         return 0;
174 }
175
176 static int
177 fs_dev_configure(struct rte_eth_dev *dev)
178 {
179         struct sub_device *sdev;
180         uint8_t i;
181         int capa_flag;
182         int ret;
183
184         FOREACH_SUBDEV(sdev, i, dev) {
185                 if (sdev->state != DEV_PROBED)
186                         continue;
187                 DEBUG("Checking capabilities for sub_device %d", i);
188                 while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
189                         /*
190                          * Refuse to change configuration if multiple devices
191                          * are present and we already have configured at least
192                          * some of them.
193                          */
194                         if (PRIV(dev)->state >= DEV_ACTIVE &&
195                             PRIV(dev)->subs_tail > 1) {
196                                 ERROR("device already configured, cannot fix live configuration");
197                                 return -1;
198                         }
199                         ret = fs_port_disable_offload(&dev->data->dev_conf,
200                                                       capa_flag);
201                         if (ret) {
202                                 ERROR("Unable to disable offload capability");
203                                 return ret;
204                         }
205                 }
206         }
207         FOREACH_SUBDEV(sdev, i, dev) {
208                 int rmv_interrupt = 0;
209                 int lsc_interrupt = 0;
210                 int lsc_enabled;
211
212                 if (sdev->state != DEV_PROBED)
213                         continue;
214
215                 rmv_interrupt = ETH(sdev)->data->dev_flags &
216                                 RTE_ETH_DEV_INTR_RMV;
217                 if (rmv_interrupt) {
218                         DEBUG("Enabling RMV interrupts for sub_device %d", i);
219                         dev->data->dev_conf.intr_conf.rmv = 1;
220                 } else {
221                         DEBUG("sub_device %d does not support RMV event", i);
222                 }
223                 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
224                 lsc_interrupt = lsc_enabled &&
225                                 (ETH(sdev)->data->dev_flags &
226                                  RTE_ETH_DEV_INTR_LSC);
227                 if (lsc_interrupt) {
228                         DEBUG("Enabling LSC interrupts for sub_device %d", i);
229                         dev->data->dev_conf.intr_conf.lsc = 1;
230                 } else if (lsc_enabled && !lsc_interrupt) {
231                         DEBUG("Disabling LSC interrupts for sub_device %d", i);
232                         dev->data->dev_conf.intr_conf.lsc = 0;
233                 }
234                 DEBUG("Configuring sub-device %d", i);
235                 sdev->remove = 0;
236                 ret = rte_eth_dev_configure(PORT_ID(sdev),
237                                         dev->data->nb_rx_queues,
238                                         dev->data->nb_tx_queues,
239                                         &dev->data->dev_conf);
240                 if (ret) {
241                         ERROR("Could not configure sub_device %d", i);
242                         return ret;
243                 }
244                 if (rmv_interrupt) {
245                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
246                                         RTE_ETH_EVENT_INTR_RMV,
247                                         failsafe_eth_rmv_event_callback,
248                                         sdev);
249                         if (ret)
250                                 WARN("Failed to register RMV callback for sub_device %d",
251                                      SUB_ID(sdev));
252                 }
253                 dev->data->dev_conf.intr_conf.rmv = 0;
254                 if (lsc_interrupt) {
255                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
256                                                 RTE_ETH_EVENT_INTR_LSC,
257                                                 failsafe_eth_lsc_event_callback,
258                                                 dev);
259                         if (ret)
260                                 WARN("Failed to register LSC callback for sub_device %d",
261                                      SUB_ID(sdev));
262                 }
263                 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
264                 sdev->state = DEV_ACTIVE;
265         }
266         if (PRIV(dev)->state < DEV_ACTIVE)
267                 PRIV(dev)->state = DEV_ACTIVE;
268         return 0;
269 }
270
271 static int
272 fs_dev_start(struct rte_eth_dev *dev)
273 {
274         struct sub_device *sdev;
275         uint8_t i;
276         int ret;
277
278         FOREACH_SUBDEV(sdev, i, dev) {
279                 if (sdev->state != DEV_ACTIVE)
280                         continue;
281                 DEBUG("Starting sub_device %d", i);
282                 ret = rte_eth_dev_start(PORT_ID(sdev));
283                 if (ret)
284                         return ret;
285                 sdev->state = DEV_STARTED;
286         }
287         if (PRIV(dev)->state < DEV_STARTED)
288                 PRIV(dev)->state = DEV_STARTED;
289         fs_switch_dev(dev, NULL);
290         return 0;
291 }
292
293 static void
294 fs_dev_stop(struct rte_eth_dev *dev)
295 {
296         struct sub_device *sdev;
297         uint8_t i;
298
299         PRIV(dev)->state = DEV_STARTED - 1;
300         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
301                 rte_eth_dev_stop(PORT_ID(sdev));
302                 sdev->state = DEV_STARTED - 1;
303         }
304 }
305
306 static int
307 fs_dev_set_link_up(struct rte_eth_dev *dev)
308 {
309         struct sub_device *sdev;
310         uint8_t i;
311         int ret;
312
313         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
314                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
315                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
316                 if (ret) {
317                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
318                               " with error %d", i, ret);
319                         return ret;
320                 }
321         }
322         return 0;
323 }
324
325 static int
326 fs_dev_set_link_down(struct rte_eth_dev *dev)
327 {
328         struct sub_device *sdev;
329         uint8_t i;
330         int ret;
331
332         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
333                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
334                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
335                 if (ret) {
336                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
337                               " with error %d", i, ret);
338                         return ret;
339                 }
340         }
341         return 0;
342 }
343
344 static void fs_dev_free_queues(struct rte_eth_dev *dev);
345 static void
346 fs_dev_close(struct rte_eth_dev *dev)
347 {
348         struct sub_device *sdev;
349         uint8_t i;
350
351         failsafe_hotplug_alarm_cancel(dev);
352         if (PRIV(dev)->state == DEV_STARTED)
353                 dev->dev_ops->dev_stop(dev);
354         PRIV(dev)->state = DEV_ACTIVE - 1;
355         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
356                 DEBUG("Closing sub_device %d", i);
357                 rte_eth_dev_close(PORT_ID(sdev));
358                 sdev->state = DEV_ACTIVE - 1;
359         }
360         fs_dev_free_queues(dev);
361 }
362
363 static void
364 fs_rx_queue_release(void *queue)
365 {
366         struct rte_eth_dev *dev;
367         struct sub_device *sdev;
368         uint8_t i;
369         struct rxq *rxq;
370
371         if (queue == NULL)
372                 return;
373         rxq = queue;
374         dev = rxq->priv->dev;
375         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
376                 SUBOPS(sdev, rx_queue_release)
377                         (ETH(sdev)->data->rx_queues[rxq->qid]);
378         dev->data->rx_queues[rxq->qid] = NULL;
379         rte_free(rxq);
380 }
381
382 static int
383 fs_rx_queue_setup(struct rte_eth_dev *dev,
384                 uint16_t rx_queue_id,
385                 uint16_t nb_rx_desc,
386                 unsigned int socket_id,
387                 const struct rte_eth_rxconf *rx_conf,
388                 struct rte_mempool *mb_pool)
389 {
390         struct sub_device *sdev;
391         struct rxq *rxq;
392         uint8_t i;
393         int ret;
394
395         rxq = dev->data->rx_queues[rx_queue_id];
396         if (rxq != NULL) {
397                 fs_rx_queue_release(rxq);
398                 dev->data->rx_queues[rx_queue_id] = NULL;
399         }
400         rxq = rte_zmalloc(NULL,
401                           sizeof(*rxq) +
402                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
403                           RTE_CACHE_LINE_SIZE);
404         if (rxq == NULL)
405                 return -ENOMEM;
406         FOREACH_SUBDEV(sdev, i, dev)
407                 rte_atomic64_init(&rxq->refcnt[i]);
408         rxq->qid = rx_queue_id;
409         rxq->socket_id = socket_id;
410         rxq->info.mp = mb_pool;
411         rxq->info.conf = *rx_conf;
412         rxq->info.nb_desc = nb_rx_desc;
413         rxq->priv = PRIV(dev);
414         dev->data->rx_queues[rx_queue_id] = rxq;
415         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
416                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
417                                 rx_queue_id,
418                                 nb_rx_desc, socket_id,
419                                 rx_conf, mb_pool);
420                 if (ret) {
421                         ERROR("RX queue setup failed for sub_device %d", i);
422                         goto free_rxq;
423                 }
424         }
425         return 0;
426 free_rxq:
427         fs_rx_queue_release(rxq);
428         return ret;
429 }
430
431 static void
432 fs_tx_queue_release(void *queue)
433 {
434         struct rte_eth_dev *dev;
435         struct sub_device *sdev;
436         uint8_t i;
437         struct txq *txq;
438
439         if (queue == NULL)
440                 return;
441         txq = queue;
442         dev = txq->priv->dev;
443         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
444                 SUBOPS(sdev, tx_queue_release)
445                         (ETH(sdev)->data->tx_queues[txq->qid]);
446         dev->data->tx_queues[txq->qid] = NULL;
447         rte_free(txq);
448 }
449
450 static int
451 fs_tx_queue_setup(struct rte_eth_dev *dev,
452                 uint16_t tx_queue_id,
453                 uint16_t nb_tx_desc,
454                 unsigned int socket_id,
455                 const struct rte_eth_txconf *tx_conf)
456 {
457         struct sub_device *sdev;
458         struct txq *txq;
459         uint8_t i;
460         int ret;
461
462         txq = dev->data->tx_queues[tx_queue_id];
463         if (txq != NULL) {
464                 fs_tx_queue_release(txq);
465                 dev->data->tx_queues[tx_queue_id] = NULL;
466         }
467         txq = rte_zmalloc("ethdev TX queue",
468                           sizeof(*txq) +
469                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
470                           RTE_CACHE_LINE_SIZE);
471         if (txq == NULL)
472                 return -ENOMEM;
473         FOREACH_SUBDEV(sdev, i, dev)
474                 rte_atomic64_init(&txq->refcnt[i]);
475         txq->qid = tx_queue_id;
476         txq->socket_id = socket_id;
477         txq->info.conf = *tx_conf;
478         txq->info.nb_desc = nb_tx_desc;
479         txq->priv = PRIV(dev);
480         dev->data->tx_queues[tx_queue_id] = txq;
481         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
482                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
483                                 tx_queue_id,
484                                 nb_tx_desc, socket_id,
485                                 tx_conf);
486                 if (ret) {
487                         ERROR("TX queue setup failed for sub_device %d", i);
488                         goto free_txq;
489                 }
490         }
491         return 0;
492 free_txq:
493         fs_tx_queue_release(txq);
494         return ret;
495 }
496
497 static void
498 fs_dev_free_queues(struct rte_eth_dev *dev)
499 {
500         uint16_t i;
501
502         for (i = 0; i < dev->data->nb_rx_queues; i++) {
503                 fs_rx_queue_release(dev->data->rx_queues[i]);
504                 dev->data->rx_queues[i] = NULL;
505         }
506         dev->data->nb_rx_queues = 0;
507         for (i = 0; i < dev->data->nb_tx_queues; i++) {
508                 fs_tx_queue_release(dev->data->tx_queues[i]);
509                 dev->data->tx_queues[i] = NULL;
510         }
511         dev->data->nb_tx_queues = 0;
512 }
513
514 static void
515 fs_promiscuous_enable(struct rte_eth_dev *dev)
516 {
517         struct sub_device *sdev;
518         uint8_t i;
519
520         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
521                 rte_eth_promiscuous_enable(PORT_ID(sdev));
522 }
523
524 static void
525 fs_promiscuous_disable(struct rte_eth_dev *dev)
526 {
527         struct sub_device *sdev;
528         uint8_t i;
529
530         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
531                 rte_eth_promiscuous_disable(PORT_ID(sdev));
532 }
533
534 static void
535 fs_allmulticast_enable(struct rte_eth_dev *dev)
536 {
537         struct sub_device *sdev;
538         uint8_t i;
539
540         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
541                 rte_eth_allmulticast_enable(PORT_ID(sdev));
542 }
543
544 static void
545 fs_allmulticast_disable(struct rte_eth_dev *dev)
546 {
547         struct sub_device *sdev;
548         uint8_t i;
549
550         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
551                 rte_eth_allmulticast_disable(PORT_ID(sdev));
552 }
553
554 static int
555 fs_link_update(struct rte_eth_dev *dev,
556                 int wait_to_complete)
557 {
558         struct sub_device *sdev;
559         uint8_t i;
560         int ret;
561
562         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
563                 DEBUG("Calling link_update on sub_device %d", i);
564                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
565                 if (ret && ret != -1) {
566                         ERROR("Link update failed for sub_device %d with error %d",
567                               i, ret);
568                         return ret;
569                 }
570         }
571         if (TX_SUBDEV(dev)) {
572                 struct rte_eth_link *l1;
573                 struct rte_eth_link *l2;
574
575                 l1 = &dev->data->dev_link;
576                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
577                 if (memcmp(l1, l2, sizeof(*l1))) {
578                         *l1 = *l2;
579                         return 0;
580                 }
581         }
582         return -1;
583 }
584
585 static void
586 fs_stats_get(struct rte_eth_dev *dev,
587              struct rte_eth_stats *stats)
588 {
589         if (TX_SUBDEV(dev) == NULL)
590                 return;
591         rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
592 }
593
594 static void
595 fs_stats_reset(struct rte_eth_dev *dev)
596 {
597         struct sub_device *sdev;
598         uint8_t i;
599
600         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
601                 rte_eth_stats_reset(PORT_ID(sdev));
602 }
603
604 /**
605  * Fail-safe dev_infos_get rules:
606  *
607  * No sub_device:
608  *   Numerables:
609  *      Use the maximum possible values for any field, so as not
610  *      to impede any further configuration effort.
611  *   Capabilities:
612  *      Limits capabilities to those that are understood by the
613  *      fail-safe PMD. This understanding stems from the fail-safe
614  *      being capable of verifying that the related capability is
615  *      expressed within the device configuration (struct rte_eth_conf).
616  *
617  * At least one probed sub_device:
618  *   Numerables:
619  *      Uses values from the active probed sub_device
620  *      The rationale here is that if any sub_device is less capable
621  *      (for example concerning the number of queues) than the active
622  *      sub_device, then its subsequent configuration will fail.
623  *      It is impossible to foresee this failure when the failing sub_device
624  *      is supposed to be plugged-in later on, so the configuration process
625  *      is the single point of failure and error reporting.
626  *   Capabilities:
627  *      Uses a logical AND of RX capabilities among
628  *      all sub_devices and the default capabilities.
629  *      Uses a logical AND of TX capabilities among
630  *      the active probed sub_device and the default capabilities.
631  *
632  */
633 static void
634 fs_dev_infos_get(struct rte_eth_dev *dev,
635                   struct rte_eth_dev_info *infos)
636 {
637         struct sub_device *sdev;
638         uint8_t i;
639
640         sdev = TX_SUBDEV(dev);
641         if (sdev == NULL) {
642                 DEBUG("No probed device, using default infos");
643                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
644                            sizeof(default_infos));
645         } else {
646                 uint32_t rx_offload_capa;
647
648                 rx_offload_capa = default_infos.rx_offload_capa;
649                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
650                         rte_eth_dev_info_get(PORT_ID(sdev),
651                                         &PRIV(dev)->infos);
652                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
653                 }
654                 sdev = TX_SUBDEV(dev);
655                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
656                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
657                 PRIV(dev)->infos.tx_offload_capa &=
658                                         default_infos.tx_offload_capa;
659                 PRIV(dev)->infos.flow_type_rss_offloads &=
660                                         default_infos.flow_type_rss_offloads;
661         }
662         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
663 }
664
665 static const uint32_t *
666 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
667 {
668         struct sub_device *sdev;
669         struct rte_eth_dev *edev;
670
671         sdev = TX_SUBDEV(dev);
672         if (sdev == NULL)
673                 return NULL;
674         edev = ETH(sdev);
675         /* ENOTSUP: counts as no supported ptypes */
676         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
677                 return NULL;
678         /*
679          * The API does not permit to do a clean AND of all ptypes,
680          * It is also incomplete by design and we do not really care
681          * to have a best possible value in this context.
682          * We just return the ptypes of the device of highest
683          * priority, usually the PREFERRED device.
684          */
685         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
686 }
687
688 static int
689 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
690 {
691         struct sub_device *sdev;
692         uint8_t i;
693         int ret;
694
695         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
696                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
697                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
698                 if (ret) {
699                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
700                               i, ret);
701                         return ret;
702                 }
703         }
704         return 0;
705 }
706
707 static int
708 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
709 {
710         struct sub_device *sdev;
711         uint8_t i;
712         int ret;
713
714         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
715                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
716                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
717                 if (ret) {
718                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
719                               " with error %d", i, ret);
720                         return ret;
721                 }
722         }
723         return 0;
724 }
725
726 static int
727 fs_flow_ctrl_get(struct rte_eth_dev *dev,
728                 struct rte_eth_fc_conf *fc_conf)
729 {
730         struct sub_device *sdev;
731
732         sdev = TX_SUBDEV(dev);
733         if (sdev == NULL)
734                 return 0;
735         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
736                 return -ENOTSUP;
737         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
738 }
739
740 static int
741 fs_flow_ctrl_set(struct rte_eth_dev *dev,
742                 struct rte_eth_fc_conf *fc_conf)
743 {
744         struct sub_device *sdev;
745         uint8_t i;
746         int ret;
747
748         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
749                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
750                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
751                 if (ret) {
752                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
753                               " with error %d", i, ret);
754                         return ret;
755                 }
756         }
757         return 0;
758 }
759
760 static void
761 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
762 {
763         struct sub_device *sdev;
764         uint8_t i;
765
766         /* No check: already done within the rte_eth_dev_mac_addr_remove
767          * call for the fail-safe device.
768          */
769         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
770                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
771                                 &dev->data->mac_addrs[index]);
772         PRIV(dev)->mac_addr_pool[index] = 0;
773 }
774
775 static int
776 fs_mac_addr_add(struct rte_eth_dev *dev,
777                 struct ether_addr *mac_addr,
778                 uint32_t index,
779                 uint32_t vmdq)
780 {
781         struct sub_device *sdev;
782         int ret;
783         uint8_t i;
784
785         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
786         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
787                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
788                 if (ret) {
789                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
790                               PRIu8 " with error %d", i, ret);
791                         return ret;
792                 }
793         }
794         if (index >= PRIV(dev)->nb_mac_addr) {
795                 DEBUG("Growing mac_addrs array");
796                 PRIV(dev)->nb_mac_addr = index;
797         }
798         PRIV(dev)->mac_addr_pool[index] = vmdq;
799         return 0;
800 }
801
802 static void
803 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
804 {
805         struct sub_device *sdev;
806         uint8_t i;
807
808         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
809                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
810 }
811
812 static int
813 fs_filter_ctrl(struct rte_eth_dev *dev,
814                 enum rte_filter_type type,
815                 enum rte_filter_op op,
816                 void *arg)
817 {
818         struct sub_device *sdev;
819         uint8_t i;
820         int ret;
821
822         if (type == RTE_ETH_FILTER_GENERIC &&
823             op == RTE_ETH_FILTER_GET) {
824                 *(const void **)arg = &fs_flow_ops;
825                 return 0;
826         }
827         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
828                 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
829                 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
830                 if (ret) {
831                         ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
832                               " with error %d", i, ret);
833                         return ret;
834                 }
835         }
836         return 0;
837 }
838
839 const struct eth_dev_ops failsafe_ops = {
840         .dev_configure = fs_dev_configure,
841         .dev_start = fs_dev_start,
842         .dev_stop = fs_dev_stop,
843         .dev_set_link_down = fs_dev_set_link_down,
844         .dev_set_link_up = fs_dev_set_link_up,
845         .dev_close = fs_dev_close,
846         .promiscuous_enable = fs_promiscuous_enable,
847         .promiscuous_disable = fs_promiscuous_disable,
848         .allmulticast_enable = fs_allmulticast_enable,
849         .allmulticast_disable = fs_allmulticast_disable,
850         .link_update = fs_link_update,
851         .stats_get = fs_stats_get,
852         .stats_reset = fs_stats_reset,
853         .dev_infos_get = fs_dev_infos_get,
854         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
855         .mtu_set = fs_mtu_set,
856         .vlan_filter_set = fs_vlan_filter_set,
857         .rx_queue_setup = fs_rx_queue_setup,
858         .tx_queue_setup = fs_tx_queue_setup,
859         .rx_queue_release = fs_rx_queue_release,
860         .tx_queue_release = fs_tx_queue_release,
861         .flow_ctrl_get = fs_flow_ctrl_get,
862         .flow_ctrl_set = fs_flow_ctrl_set,
863         .mac_addr_remove = fs_mac_addr_remove,
864         .mac_addr_add = fs_mac_addr_add,
865         .mac_addr_set = fs_mac_addr_set,
866         .filter_ctrl = fs_filter_ctrl,
867 };