1bf86445b689d3f19f926ec7963eb4c30d48f5c7
[deb_dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <rte_mempool.h>
33
34 #include "efx.h"
35
36 #include "sfc.h"
37 #include "sfc_debug.h"
38 #include "sfc_log.h"
39 #include "sfc_ev.h"
40 #include "sfc_rx.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
43
44 /*
45  * Maximum number of Rx queue flush attempt in the case of failure or
46  * flush timeout
47  */
48 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
49
50 /*
51  * Time to wait between event queue polling attempts when waiting for Rx
52  * queue flush done or failed events.
53  */
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
55
56 /*
57  * Maximum number of event queue polling attempts when waiting for Rx queue
58  * flush done or failed events. It defines Rx queue flush attempt timeout
59  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
60  */
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
62
63 void
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
65 {
66         rxq->state |= SFC_RXQ_FLUSHED;
67         rxq->state &= ~SFC_RXQ_FLUSHING;
68 }
69
70 void
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
72 {
73         rxq->state |= SFC_RXQ_FLUSH_FAILED;
74         rxq->state &= ~SFC_RXQ_FLUSHING;
75 }
76
77 static void
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
79 {
80         unsigned int free_space;
81         unsigned int bulks;
82         void *objs[SFC_RX_REFILL_BULK];
83         efsys_dma_addr_t addr[RTE_DIM(objs)];
84         unsigned int added = rxq->added;
85         unsigned int id;
86         unsigned int i;
87         struct sfc_efx_rx_sw_desc *rxd;
88         struct rte_mbuf *m;
89         uint16_t port_id = rxq->dp.dpq.port_id;
90
91         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
92                 (added - rxq->completed);
93
94         if (free_space < rxq->refill_threshold)
95                 return;
96
97         bulks = free_space / RTE_DIM(objs);
98         /* refill_threshold guarantees that bulks is positive */
99         SFC_ASSERT(bulks > 0);
100
101         id = added & rxq->ptr_mask;
102         do {
103                 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
104                                                   RTE_DIM(objs)) < 0)) {
105                         /*
106                          * It is hardly a safe way to increment counter
107                          * from different contexts, but all PMDs do it.
108                          */
109                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
110                                 RTE_DIM(objs);
111                         /* Return if we have posted nothing yet */
112                         if (added == rxq->added)
113                                 return;
114                         /* Push posted */
115                         break;
116                 }
117
118                 for (i = 0; i < RTE_DIM(objs);
119                      ++i, id = (id + 1) & rxq->ptr_mask) {
120                         m = objs[i];
121
122                         rxd = &rxq->sw_desc[id];
123                         rxd->mbuf = m;
124
125                         SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
126                         m->data_off = RTE_PKTMBUF_HEADROOM;
127                         SFC_ASSERT(m->next == NULL);
128                         SFC_ASSERT(m->nb_segs == 1);
129                         m->port = port_id;
130
131                         addr[i] = rte_pktmbuf_mtophys(m);
132                 }
133
134                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135                              RTE_DIM(objs), rxq->completed, added);
136                 added += RTE_DIM(objs);
137         } while (--bulks > 0);
138
139         SFC_ASSERT(added != rxq->added);
140         rxq->added = added;
141         efx_rx_qpush(rxq->common, added, &rxq->pushed);
142 }
143
144 static uint64_t
145 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
146 {
147         uint64_t mbuf_flags = 0;
148
149         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
150         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
151                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
152                 break;
153         case EFX_PKT_IPV4:
154                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
155                 break;
156         default:
157                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
158                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
159                            PKT_RX_IP_CKSUM_UNKNOWN);
160                 break;
161         }
162
163         switch ((desc_flags &
164                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
165         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
166         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
167                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
168                 break;
169         case EFX_PKT_TCP:
170         case EFX_PKT_UDP:
171                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
172                 break;
173         default:
174                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
175                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
176                            PKT_RX_L4_CKSUM_UNKNOWN);
177                 break;
178         }
179
180         return mbuf_flags;
181 }
182
183 static uint32_t
184 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
185 {
186         return RTE_PTYPE_L2_ETHER |
187                 ((desc_flags & EFX_PKT_IPV4) ?
188                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
189                 ((desc_flags & EFX_PKT_IPV6) ?
190                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
191                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
192                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
193 }
194
195 static const uint32_t *
196 sfc_efx_supported_ptypes_get(void)
197 {
198         static const uint32_t ptypes[] = {
199                 RTE_PTYPE_L2_ETHER,
200                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
201                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
202                 RTE_PTYPE_L4_TCP,
203                 RTE_PTYPE_L4_UDP,
204                 RTE_PTYPE_UNKNOWN
205         };
206
207         return ptypes;
208 }
209
210 static void
211 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
212                         struct rte_mbuf *m)
213 {
214 #if EFSYS_OPT_RX_SCALE
215         uint8_t *mbuf_data;
216
217
218         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
219                 return;
220
221         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
222
223         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225                                                       EFX_RX_HASHALG_TOEPLITZ,
226                                                       mbuf_data);
227
228                 m->ol_flags |= PKT_RX_RSS_HASH;
229         }
230 #endif
231 }
232
233 static uint16_t
234 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
235 {
236         struct sfc_dp_rxq *dp_rxq = rx_queue;
237         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
238         unsigned int completed;
239         unsigned int prefix_size = rxq->prefix_size;
240         unsigned int done_pkts = 0;
241         boolean_t discard_next = B_FALSE;
242         struct rte_mbuf *scatter_pkt = NULL;
243
244         if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
245                 return 0;
246
247         sfc_ev_qpoll(rxq->evq);
248
249         completed = rxq->completed;
250         while (completed != rxq->pending && done_pkts < nb_pkts) {
251                 unsigned int id;
252                 struct sfc_efx_rx_sw_desc *rxd;
253                 struct rte_mbuf *m;
254                 unsigned int seg_len;
255                 unsigned int desc_flags;
256
257                 id = completed++ & rxq->ptr_mask;
258                 rxd = &rxq->sw_desc[id];
259                 m = rxd->mbuf;
260                 desc_flags = rxd->flags;
261
262                 if (discard_next)
263                         goto discard;
264
265                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
266                         goto discard;
267
268                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
269                         uint16_t tmp_size;
270                         int rc __rte_unused;
271
272                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
273                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
274                         SFC_ASSERT(rc == 0);
275                         seg_len = tmp_size;
276                 } else {
277                         seg_len = rxd->size - prefix_size;
278                 }
279
280                 rte_pktmbuf_data_len(m) = seg_len;
281                 rte_pktmbuf_pkt_len(m) = seg_len;
282
283                 if (scatter_pkt != NULL) {
284                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
285                                 rte_pktmbuf_free(scatter_pkt);
286                                 goto discard;
287                         }
288                         /* The packet to deliver */
289                         m = scatter_pkt;
290                 }
291
292                 if (desc_flags & EFX_PKT_CONT) {
293                         /* The packet is scattered, more fragments to come */
294                         scatter_pkt = m;
295                         /* Further fragments have no prefix */
296                         prefix_size = 0;
297                         continue;
298                 }
299
300                 /* Scattered packet is done */
301                 scatter_pkt = NULL;
302                 /* The first fragment of the packet has prefix */
303                 prefix_size = rxq->prefix_size;
304
305                 m->ol_flags =
306                         sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
307                 m->packet_type =
308                         sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
309
310                 /*
311                  * Extract RSS hash from the packet prefix and
312                  * set the corresponding field (if needed and possible)
313                  */
314                 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
315
316                 m->data_off += prefix_size;
317
318                 *rx_pkts++ = m;
319                 done_pkts++;
320                 continue;
321
322 discard:
323                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
324                 rte_mempool_put(rxq->refill_mb_pool, m);
325                 rxd->mbuf = NULL;
326         }
327
328         /* pending is only moved when entire packet is received */
329         SFC_ASSERT(scatter_pkt == NULL);
330
331         rxq->completed = completed;
332
333         sfc_efx_rx_qrefill(rxq);
334
335         return done_pkts;
336 }
337
338 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
339 static unsigned int
340 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
341 {
342         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
343
344         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
345                 return 0;
346
347         sfc_ev_qpoll(rxq->evq);
348
349         return rxq->pending - rxq->completed;
350 }
351
352 struct sfc_rxq *
353 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
354 {
355         const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
356         struct rte_eth_dev *eth_dev;
357         struct sfc_adapter *sa;
358         struct sfc_rxq *rxq;
359
360         SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
361         eth_dev = &rte_eth_devices[dpq->port_id];
362
363         sa = eth_dev->data->dev_private;
364
365         SFC_ASSERT(dpq->queue_id < sa->rxq_count);
366         rxq = sa->rxq_info[dpq->queue_id].rxq;
367
368         SFC_ASSERT(rxq != NULL);
369         return rxq;
370 }
371
372 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
373 static int
374 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
375                    const struct rte_pci_addr *pci_addr, int socket_id,
376                    const struct sfc_dp_rx_qcreate_info *info,
377                    struct sfc_dp_rxq **dp_rxqp)
378 {
379         struct sfc_efx_rxq *rxq;
380         int rc;
381
382         rc = ENOMEM;
383         rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
384                                  RTE_CACHE_LINE_SIZE, socket_id);
385         if (rxq == NULL)
386                 goto fail_rxq_alloc;
387
388         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
389
390         rc = ENOMEM;
391         rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
392                                          info->rxq_entries,
393                                          sizeof(*rxq->sw_desc),
394                                          RTE_CACHE_LINE_SIZE, socket_id);
395         if (rxq->sw_desc == NULL)
396                 goto fail_desc_alloc;
397
398         /* efx datapath is bound to efx control path */
399         rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
400         if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
401                 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
402         rxq->ptr_mask = info->rxq_entries - 1;
403         rxq->batch_max = info->batch_max;
404         rxq->prefix_size = info->prefix_size;
405         rxq->refill_threshold = info->refill_threshold;
406         rxq->buf_size = info->buf_size;
407         rxq->refill_mb_pool = info->refill_mb_pool;
408
409         *dp_rxqp = &rxq->dp;
410         return 0;
411
412 fail_desc_alloc:
413         rte_free(rxq);
414
415 fail_rxq_alloc:
416         return rc;
417 }
418
419 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
420 static void
421 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
422 {
423         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
424
425         rte_free(rxq->sw_desc);
426         rte_free(rxq);
427 }
428
429 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
430 static int
431 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
432                   __rte_unused unsigned int evq_read_ptr)
433 {
434         /* libefx-based datapath is specific to libefx-based PMD */
435         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
436         struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
437
438         rxq->common = crxq->common;
439
440         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
441
442         sfc_efx_rx_qrefill(rxq);
443
444         rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
445
446         return 0;
447 }
448
449 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
450 static void
451 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
452                  __rte_unused unsigned int *evq_read_ptr)
453 {
454         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
455
456         rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
457
458         /* libefx-based datapath is bound to libefx-based PMD and uses
459          * event queue structure directly. So, there is no necessity to
460          * return EvQ read pointer.
461          */
462 }
463
464 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
465 static void
466 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
467 {
468         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
469         unsigned int i;
470         struct sfc_efx_rx_sw_desc *rxd;
471
472         for (i = rxq->completed; i != rxq->added; ++i) {
473                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
474                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
475                 rxd->mbuf = NULL;
476                 /* Packed stream relies on 0 in inactive SW desc.
477                  * Rx queue stop is not performance critical, so
478                  * there is no harm to do it always.
479                  */
480                 rxd->flags = 0;
481                 rxd->size = 0;
482         }
483
484         rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
485 }
486
487 struct sfc_dp_rx sfc_efx_rx = {
488         .dp = {
489                 .name           = SFC_KVARG_DATAPATH_EFX,
490                 .type           = SFC_DP_RX,
491                 .hw_fw_caps     = 0,
492         },
493         .features               = SFC_DP_RX_FEAT_SCATTER,
494         .qcreate                = sfc_efx_rx_qcreate,
495         .qdestroy               = sfc_efx_rx_qdestroy,
496         .qstart                 = sfc_efx_rx_qstart,
497         .qstop                  = sfc_efx_rx_qstop,
498         .qpurge                 = sfc_efx_rx_qpurge,
499         .supported_ptypes_get   = sfc_efx_supported_ptypes_get,
500         .qdesc_npending         = sfc_efx_rx_qdesc_npending,
501         .pkt_burst              = sfc_efx_recv_pkts,
502 };
503
504 unsigned int
505 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
506 {
507         struct sfc_rxq *rxq;
508
509         SFC_ASSERT(sw_index < sa->rxq_count);
510         rxq = sa->rxq_info[sw_index].rxq;
511
512         if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
513                 return 0;
514
515         return sa->dp_rx->qdesc_npending(rxq->dp);
516 }
517
518 int
519 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
520 {
521         struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
522
523         return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
524 }
525
526 static void
527 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
528 {
529         struct sfc_rxq *rxq;
530         unsigned int retry_count;
531         unsigned int wait_count;
532         int rc;
533
534         rxq = sa->rxq_info[sw_index].rxq;
535         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
536
537         /*
538          * Retry Rx queue flushing in the case of flush failed or
539          * timeout. In the worst case it can delay for 6 seconds.
540          */
541         for (retry_count = 0;
542              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
543              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
544              ++retry_count) {
545                 rc = efx_rx_qflush(rxq->common);
546                 if (rc != 0) {
547                         rxq->state |= (rc == EALREADY) ?
548                                 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
549                         break;
550                 }
551                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
552                 rxq->state |= SFC_RXQ_FLUSHING;
553
554                 /*
555                  * Wait for Rx queue flush done or failed event at least
556                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
557                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
558                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
559                  */
560                 wait_count = 0;
561                 do {
562                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
563                         sfc_ev_qpoll(rxq->evq);
564                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
565                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
566
567                 if (rxq->state & SFC_RXQ_FLUSHING)
568                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
569
570                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
571                         sfc_err(sa, "RxQ %u flush failed", sw_index);
572
573                 if (rxq->state & SFC_RXQ_FLUSHED)
574                         sfc_info(sa, "RxQ %u flushed", sw_index);
575         }
576
577         sa->dp_rx->qpurge(rxq->dp);
578 }
579
580 static int
581 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
582 {
583         boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
584         struct sfc_port *port = &sa->port;
585         int rc;
586
587         /*
588          * If promiscuous or all-multicast mode has been requested, setting
589          * filter for the default Rx queue might fail, in particular, while
590          * running over PCI function which is not a member of corresponding
591          * privilege groups; if this occurs, few iterations will be made to
592          * repeat this step without promiscuous and all-multicast flags set
593          */
594 retry:
595         rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
596         if (rc == 0)
597                 return 0;
598         else if (rc != EOPNOTSUPP)
599                 return rc;
600
601         if (port->promisc) {
602                 sfc_warn(sa, "promiscuous mode has been requested, "
603                              "but the HW rejects it");
604                 sfc_warn(sa, "promiscuous mode will be disabled");
605
606                 port->promisc = B_FALSE;
607                 rc = sfc_set_rx_mode(sa);
608                 if (rc != 0)
609                         return rc;
610
611                 goto retry;
612         }
613
614         if (port->allmulti) {
615                 sfc_warn(sa, "all-multicast mode has been requested, "
616                              "but the HW rejects it");
617                 sfc_warn(sa, "all-multicast mode will be disabled");
618
619                 port->allmulti = B_FALSE;
620                 rc = sfc_set_rx_mode(sa);
621                 if (rc != 0)
622                         return rc;
623
624                 goto retry;
625         }
626
627         return rc;
628 }
629
630 int
631 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
632 {
633         struct sfc_port *port = &sa->port;
634         struct sfc_rxq_info *rxq_info;
635         struct sfc_rxq *rxq;
636         struct sfc_evq *evq;
637         int rc;
638
639         sfc_log_init(sa, "sw_index=%u", sw_index);
640
641         SFC_ASSERT(sw_index < sa->rxq_count);
642
643         rxq_info = &sa->rxq_info[sw_index];
644         rxq = rxq_info->rxq;
645         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
646
647         evq = rxq->evq;
648
649         rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
650         if (rc != 0)
651                 goto fail_ev_qstart;
652
653         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
654                             &rxq->mem, rxq_info->entries,
655                             0 /* not used on EF10 */, evq->common,
656                             &rxq->common);
657         if (rc != 0)
658                 goto fail_rx_qcreate;
659
660         efx_rx_qenable(rxq->common);
661
662         rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
663         if (rc != 0)
664                 goto fail_dp_qstart;
665
666         rxq->state |= SFC_RXQ_STARTED;
667
668         if ((sw_index == 0) && !port->isolated) {
669                 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
670                 if (rc != 0)
671                         goto fail_mac_filter_default_rxq_set;
672         }
673
674         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
675         sa->eth_dev->data->rx_queue_state[sw_index] =
676                 RTE_ETH_QUEUE_STATE_STARTED;
677
678         return 0;
679
680 fail_mac_filter_default_rxq_set:
681         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
682
683 fail_dp_qstart:
684         sfc_rx_qflush(sa, sw_index);
685
686 fail_rx_qcreate:
687         sfc_ev_qstop(evq);
688
689 fail_ev_qstart:
690         return rc;
691 }
692
693 void
694 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
695 {
696         struct sfc_rxq_info *rxq_info;
697         struct sfc_rxq *rxq;
698
699         sfc_log_init(sa, "sw_index=%u", sw_index);
700
701         SFC_ASSERT(sw_index < sa->rxq_count);
702
703         rxq_info = &sa->rxq_info[sw_index];
704         rxq = rxq_info->rxq;
705
706         if (rxq->state == SFC_RXQ_INITIALIZED)
707                 return;
708         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
709
710         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
711         sa->eth_dev->data->rx_queue_state[sw_index] =
712                 RTE_ETH_QUEUE_STATE_STOPPED;
713
714         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
715
716         if (sw_index == 0)
717                 efx_mac_filter_default_rxq_clear(sa->nic);
718
719         sfc_rx_qflush(sa, sw_index);
720
721         rxq->state = SFC_RXQ_INITIALIZED;
722
723         efx_rx_qdestroy(rxq->common);
724
725         sfc_ev_qstop(rxq->evq);
726 }
727
728 static int
729 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
730                    const struct rte_eth_rxconf *rx_conf)
731 {
732         const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
733         int rc = 0;
734
735         if (rx_conf->rx_thresh.pthresh != 0 ||
736             rx_conf->rx_thresh.hthresh != 0 ||
737             rx_conf->rx_thresh.wthresh != 0) {
738                 sfc_err(sa,
739                         "RxQ prefetch/host/writeback thresholds are not supported");
740                 rc = EINVAL;
741         }
742
743         if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
744                 sfc_err(sa,
745                         "RxQ free threshold too large: %u vs maximum %u",
746                         rx_conf->rx_free_thresh, rx_free_thresh_max);
747                 rc = EINVAL;
748         }
749
750         if (rx_conf->rx_drop_en == 0) {
751                 sfc_err(sa, "RxQ drop disable is not supported");
752                 rc = EINVAL;
753         }
754
755         return rc;
756 }
757
758 static unsigned int
759 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
760 {
761         uint32_t data_off;
762         uint32_t order;
763
764         /* The mbuf object itself is always cache line aligned */
765         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
766
767         /* Data offset from mbuf object start */
768         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
769                 RTE_PKTMBUF_HEADROOM;
770
771         order = MIN(order, rte_bsf32(data_off));
772
773         return 1u << (order - 1);
774 }
775
776 static uint16_t
777 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
778 {
779         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
780         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
781         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
782         uint16_t buf_size;
783         unsigned int buf_aligned;
784         unsigned int start_alignment;
785         unsigned int end_padding_alignment;
786
787         /* Below it is assumed that both alignments are power of 2 */
788         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
789         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
790
791         /*
792          * mbuf is always cache line aligned, double-check
793          * that it meets rx buffer start alignment requirements.
794          */
795
796         /* Start from mbuf pool data room size */
797         buf_size = rte_pktmbuf_data_room_size(mb_pool);
798
799         /* Remove headroom */
800         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
801                 sfc_err(sa,
802                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
803                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
804                 return 0;
805         }
806         buf_size -= RTE_PKTMBUF_HEADROOM;
807
808         /* Calculate guaranteed data start alignment */
809         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
810
811         /* Reserve space for start alignment */
812         if (buf_aligned < nic_align_start) {
813                 start_alignment = nic_align_start - buf_aligned;
814                 if (buf_size <= start_alignment) {
815                         sfc_err(sa,
816                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
817                                 mb_pool->name,
818                                 rte_pktmbuf_data_room_size(mb_pool),
819                                 RTE_PKTMBUF_HEADROOM, start_alignment);
820                         return 0;
821                 }
822                 buf_aligned = nic_align_start;
823                 buf_size -= start_alignment;
824         } else {
825                 start_alignment = 0;
826         }
827
828         /* Make sure that end padding does not write beyond the buffer */
829         if (buf_aligned < nic_align_end) {
830                 /*
831                  * Estimate space which can be lost. If guarnteed buffer
832                  * size is odd, lost space is (nic_align_end - 1). More
833                  * accurate formula is below.
834                  */
835                 end_padding_alignment = nic_align_end -
836                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
837                 if (buf_size <= end_padding_alignment) {
838                         sfc_err(sa,
839                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
840                                 mb_pool->name,
841                                 rte_pktmbuf_data_room_size(mb_pool),
842                                 RTE_PKTMBUF_HEADROOM, start_alignment,
843                                 end_padding_alignment);
844                         return 0;
845                 }
846                 buf_size -= end_padding_alignment;
847         } else {
848                 /*
849                  * Start is aligned the same or better than end,
850                  * just align length.
851                  */
852                 buf_size = P2ALIGN(buf_size, nic_align_end);
853         }
854
855         return buf_size;
856 }
857
858 int
859 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
860              uint16_t nb_rx_desc, unsigned int socket_id,
861              const struct rte_eth_rxconf *rx_conf,
862              struct rte_mempool *mb_pool)
863 {
864         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
865         int rc;
866         uint16_t buf_size;
867         struct sfc_rxq_info *rxq_info;
868         struct sfc_evq *evq;
869         struct sfc_rxq *rxq;
870         struct sfc_dp_rx_qcreate_info info;
871
872         rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
873         if (rc != 0)
874                 goto fail_bad_conf;
875
876         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
877         if (buf_size == 0) {
878                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
879                         sw_index);
880                 rc = EINVAL;
881                 goto fail_bad_conf;
882         }
883
884         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
885             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
886                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
887                         "object size is too small", sw_index);
888                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
889                         "PDU size %u plus Rx prefix %u bytes",
890                         sw_index, buf_size, (unsigned int)sa->port.pdu,
891                         encp->enc_rx_prefix_size);
892                 rc = EINVAL;
893                 goto fail_bad_conf;
894         }
895
896         SFC_ASSERT(sw_index < sa->rxq_count);
897         rxq_info = &sa->rxq_info[sw_index];
898
899         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
900         rxq_info->entries = nb_rx_desc;
901         rxq_info->type =
902                 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
903                 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
904
905         rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
906                           rxq_info->entries, socket_id, &evq);
907         if (rc != 0)
908                 goto fail_ev_qinit;
909
910         rc = ENOMEM;
911         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
912                                  socket_id);
913         if (rxq == NULL)
914                 goto fail_rxq_alloc;
915
916         rxq_info->rxq = rxq;
917
918         rxq->evq = evq;
919         rxq->hw_index = sw_index;
920         rxq->refill_threshold =
921                 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
922         rxq->refill_mb_pool = mb_pool;
923
924         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
925                            socket_id, &rxq->mem);
926         if (rc != 0)
927                 goto fail_dma_alloc;
928
929         memset(&info, 0, sizeof(info));
930         info.refill_mb_pool = rxq->refill_mb_pool;
931         info.refill_threshold = rxq->refill_threshold;
932         info.buf_size = buf_size;
933         info.batch_max = encp->enc_rx_batch_max;
934         info.prefix_size = encp->enc_rx_prefix_size;
935
936 #if EFSYS_OPT_RX_SCALE
937         if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
938                 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
939 #endif
940
941         info.rxq_entries = rxq_info->entries;
942         info.rxq_hw_ring = rxq->mem.esm_base;
943         info.evq_entries = rxq_info->entries;
944         info.evq_hw_ring = evq->mem.esm_base;
945         info.hw_index = rxq->hw_index;
946         info.mem_bar = sa->mem_bar.esb_base;
947
948         rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
949                                 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
950                                 socket_id, &info, &rxq->dp);
951         if (rc != 0)
952                 goto fail_dp_rx_qcreate;
953
954         evq->dp_rxq = rxq->dp;
955
956         rxq->state = SFC_RXQ_INITIALIZED;
957
958         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
959
960         return 0;
961
962 fail_dp_rx_qcreate:
963         sfc_dma_free(sa, &rxq->mem);
964
965 fail_dma_alloc:
966         rxq_info->rxq = NULL;
967         rte_free(rxq);
968
969 fail_rxq_alloc:
970         sfc_ev_qfini(evq);
971
972 fail_ev_qinit:
973         rxq_info->entries = 0;
974
975 fail_bad_conf:
976         sfc_log_init(sa, "failed %d", rc);
977         return rc;
978 }
979
980 void
981 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
982 {
983         struct sfc_rxq_info *rxq_info;
984         struct sfc_rxq *rxq;
985
986         SFC_ASSERT(sw_index < sa->rxq_count);
987
988         rxq_info = &sa->rxq_info[sw_index];
989
990         rxq = rxq_info->rxq;
991         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
992
993         sa->dp_rx->qdestroy(rxq->dp);
994         rxq->dp = NULL;
995
996         rxq_info->rxq = NULL;
997         rxq_info->entries = 0;
998
999         sfc_dma_free(sa, &rxq->mem);
1000
1001         sfc_ev_qfini(rxq->evq);
1002         rxq->evq = NULL;
1003
1004         rte_free(rxq);
1005 }
1006
1007 #if EFSYS_OPT_RX_SCALE
1008 efx_rx_hash_type_t
1009 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1010 {
1011         efx_rx_hash_type_t efx_hash_types = 0;
1012
1013         if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1014                        ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1015                 efx_hash_types |= EFX_RX_HASH_IPV4;
1016
1017         if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1018                 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1019
1020         if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1021                         ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1022                 efx_hash_types |= EFX_RX_HASH_IPV6;
1023
1024         if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1025                 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1026
1027         return efx_hash_types;
1028 }
1029
1030 uint64_t
1031 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1032 {
1033         uint64_t rss_hf = 0;
1034
1035         if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1036                 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1037                            ETH_RSS_NONFRAG_IPV4_OTHER);
1038
1039         if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1040                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1041
1042         if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1043                 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1044                            ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1045
1046         if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1047                 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1048
1049         return rss_hf;
1050 }
1051 #endif
1052
1053 static int
1054 sfc_rx_rss_config(struct sfc_adapter *sa)
1055 {
1056         int rc = 0;
1057
1058 #if EFSYS_OPT_RX_SCALE
1059         if (sa->rss_channels > 0) {
1060                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
1061                                            sa->rss_hash_types, B_TRUE);
1062                 if (rc != 0)
1063                         goto finish;
1064
1065                 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
1066                                           sizeof(sa->rss_key));
1067                 if (rc != 0)
1068                         goto finish;
1069
1070                 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
1071                                           sizeof(sa->rss_tbl));
1072         }
1073
1074 finish:
1075 #endif
1076         return rc;
1077 }
1078
1079 int
1080 sfc_rx_start(struct sfc_adapter *sa)
1081 {
1082         unsigned int sw_index;
1083         int rc;
1084
1085         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1086
1087         rc = efx_rx_init(sa->nic);
1088         if (rc != 0)
1089                 goto fail_rx_init;
1090
1091         rc = sfc_rx_rss_config(sa);
1092         if (rc != 0)
1093                 goto fail_rss_config;
1094
1095         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1096                 if ((!sa->rxq_info[sw_index].deferred_start ||
1097                      sa->rxq_info[sw_index].deferred_started)) {
1098                         rc = sfc_rx_qstart(sa, sw_index);
1099                         if (rc != 0)
1100                                 goto fail_rx_qstart;
1101                 }
1102         }
1103
1104         return 0;
1105
1106 fail_rx_qstart:
1107         while (sw_index-- > 0)
1108                 sfc_rx_qstop(sa, sw_index);
1109
1110 fail_rss_config:
1111         efx_rx_fini(sa->nic);
1112
1113 fail_rx_init:
1114         sfc_log_init(sa, "failed %d", rc);
1115         return rc;
1116 }
1117
1118 void
1119 sfc_rx_stop(struct sfc_adapter *sa)
1120 {
1121         unsigned int sw_index;
1122
1123         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1124
1125         sw_index = sa->rxq_count;
1126         while (sw_index-- > 0) {
1127                 if (sa->rxq_info[sw_index].rxq != NULL)
1128                         sfc_rx_qstop(sa, sw_index);
1129         }
1130
1131         efx_rx_fini(sa->nic);
1132 }
1133
1134 static int
1135 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1136 {
1137         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1138         unsigned int max_entries;
1139
1140         max_entries = EFX_RXQ_MAXNDESCS;
1141         SFC_ASSERT(rte_is_power_of_2(max_entries));
1142
1143         rxq_info->max_entries = max_entries;
1144
1145         return 0;
1146 }
1147
1148 static int
1149 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1150 {
1151         int rc = 0;
1152
1153         switch (rxmode->mq_mode) {
1154         case ETH_MQ_RX_NONE:
1155                 /* No special checks are required */
1156                 break;
1157 #if EFSYS_OPT_RX_SCALE
1158         case ETH_MQ_RX_RSS:
1159                 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1160                         sfc_err(sa, "RSS is not available");
1161                         rc = EINVAL;
1162                 }
1163                 break;
1164 #endif
1165         default:
1166                 sfc_err(sa, "Rx multi-queue mode %u not supported",
1167                         rxmode->mq_mode);
1168                 rc = EINVAL;
1169         }
1170
1171         if (rxmode->header_split) {
1172                 sfc_err(sa, "Header split on Rx not supported");
1173                 rc = EINVAL;
1174         }
1175
1176         if (rxmode->hw_vlan_filter) {
1177                 sfc_err(sa, "HW VLAN filtering not supported");
1178                 rc = EINVAL;
1179         }
1180
1181         if (rxmode->hw_vlan_strip) {
1182                 sfc_err(sa, "HW VLAN stripping not supported");
1183                 rc = EINVAL;
1184         }
1185
1186         if (rxmode->hw_vlan_extend) {
1187                 sfc_err(sa,
1188                         "Q-in-Q HW VLAN stripping not supported");
1189                 rc = EINVAL;
1190         }
1191
1192         if (!rxmode->hw_strip_crc) {
1193                 sfc_warn(sa,
1194                          "FCS stripping control not supported - always stripped");
1195                 rxmode->hw_strip_crc = 1;
1196         }
1197
1198         if (rxmode->enable_scatter &&
1199             (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1200                 sfc_err(sa, "Rx scatter not supported by %s datapath",
1201                         sa->dp_rx->dp.name);
1202                 rc = EINVAL;
1203         }
1204
1205         if (rxmode->enable_lro) {
1206                 sfc_err(sa, "LRO not supported");
1207                 rc = EINVAL;
1208         }
1209
1210         return rc;
1211 }
1212
1213 /**
1214  * Destroy excess queues that are no longer needed after reconfiguration
1215  * or complete close.
1216  */
1217 static void
1218 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1219 {
1220         int sw_index;
1221
1222         SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1223
1224         sw_index = sa->rxq_count;
1225         while (--sw_index >= (int)nb_rx_queues) {
1226                 if (sa->rxq_info[sw_index].rxq != NULL)
1227                         sfc_rx_qfini(sa, sw_index);
1228         }
1229
1230         sa->rxq_count = nb_rx_queues;
1231 }
1232
1233 /**
1234  * Initialize Rx subsystem.
1235  *
1236  * Called at device (re)configuration stage when number of receive queues is
1237  * specified together with other device level receive configuration.
1238  *
1239  * It should be used to allocate NUMA-unaware resources.
1240  */
1241 int
1242 sfc_rx_configure(struct sfc_adapter *sa)
1243 {
1244         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1245         const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1246         unsigned int sw_index;
1247         int rc;
1248
1249         sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1250                      nb_rx_queues, sa->rxq_count);
1251
1252         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1253         if (rc != 0)
1254                 goto fail_check_mode;
1255
1256         if (nb_rx_queues == sa->rxq_count)
1257                 goto done;
1258
1259         if (sa->rxq_info == NULL) {
1260                 rc = ENOMEM;
1261                 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1262                                                  sizeof(sa->rxq_info[0]), 0,
1263                                                  sa->socket_id);
1264                 if (sa->rxq_info == NULL)
1265                         goto fail_rxqs_alloc;
1266         } else {
1267                 struct sfc_rxq_info *new_rxq_info;
1268
1269                 if (nb_rx_queues < sa->rxq_count)
1270                         sfc_rx_fini_queues(sa, nb_rx_queues);
1271
1272                 rc = ENOMEM;
1273                 new_rxq_info =
1274                         rte_realloc(sa->rxq_info,
1275                                     nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1276                 if (new_rxq_info == NULL && nb_rx_queues > 0)
1277                         goto fail_rxqs_realloc;
1278
1279                 sa->rxq_info = new_rxq_info;
1280                 if (nb_rx_queues > sa->rxq_count)
1281                         memset(&sa->rxq_info[sa->rxq_count], 0,
1282                                (nb_rx_queues - sa->rxq_count) *
1283                                sizeof(sa->rxq_info[0]));
1284         }
1285
1286         while (sa->rxq_count < nb_rx_queues) {
1287                 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1288                 if (rc != 0)
1289                         goto fail_rx_qinit_info;
1290
1291                 sa->rxq_count++;
1292         }
1293
1294 #if EFSYS_OPT_RX_SCALE
1295         sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1296                            MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1297
1298         if (sa->rss_channels > 0) {
1299                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1300                         sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1301         }
1302 #endif
1303
1304 done:
1305         return 0;
1306
1307 fail_rx_qinit_info:
1308 fail_rxqs_realloc:
1309 fail_rxqs_alloc:
1310         sfc_rx_close(sa);
1311
1312 fail_check_mode:
1313         sfc_log_init(sa, "failed %d", rc);
1314         return rc;
1315 }
1316
1317 /**
1318  * Shutdown Rx subsystem.
1319  *
1320  * Called at device close stage, for example, before device shutdown.
1321  */
1322 void
1323 sfc_rx_close(struct sfc_adapter *sa)
1324 {
1325         sfc_rx_fini_queues(sa, 0);
1326
1327         sa->rss_channels = 0;
1328
1329         rte_free(sa->rxq_info);
1330         sa->rxq_info = NULL;
1331 }