Imported Upstream version 16.11
[deb_dpdk.git] / drivers / net / mpipe / mpipe_tilegx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of EZchip Semiconductor nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34
35 #include <rte_eal.h>
36 #include <rte_vdev.h>
37 #include <rte_eal_memconfig.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include <arch/mpipe_xaui_def.h>
43 #include <arch/mpipe_gbe_def.h>
44
45 #include <gxio/mpipe.h>
46
47 #ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
48 #define PMD_DEBUG_RX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
49 #define PMD_DEBUG_TX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
50 #else
51 #define PMD_DEBUG_RX(...)
52 #define PMD_DEBUG_TX(...)
53 #endif
54
55 #define MPIPE_MAX_CHANNELS              128
56 #define MPIPE_TX_MAX_QUEUES             128
57 #define MPIPE_RX_MAX_QUEUES             16
58 #define MPIPE_TX_DESCS                  512
59 #define MPIPE_RX_BUCKETS                256
60 #define MPIPE_RX_STACK_SIZE             65536
61 #define MPIPE_RX_IP_ALIGN               2
62 #define MPIPE_BSM_ALIGN                 128
63
64 #define MPIPE_LINK_UPDATE_TIMEOUT       10      /*  s */
65 #define MPIPE_LINK_UPDATE_INTERVAL      100000  /* us */
66
67 struct mpipe_channel_config {
68         int enable;
69         int first_bucket;
70         int num_buckets;
71         int head_room;
72         gxio_mpipe_rules_stacks_t stacks;
73 };
74
75 struct mpipe_context {
76         rte_spinlock_t        lock;
77         gxio_mpipe_context_t  context;
78         struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
79 };
80
81 /* Per-core local data. */
82 struct mpipe_local {
83         int mbuf_push_debt[RTE_MAX_ETHPORTS];   /* Buffer push debt. */
84 } __rte_cache_aligned;
85
86 #define MPIPE_BUF_DEBT_THRESHOLD        32
87 static __thread struct mpipe_local mpipe_local;
88 static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
89 static int mpipe_instances;
90 static const char *drivername = "MPIPE PMD";
91
92 /* Per queue statistics. */
93 struct mpipe_queue_stats {
94         uint64_t packets, bytes, errors, nomem;
95 };
96
97 /* Common tx/rx queue fields. */
98 struct mpipe_queue {
99         struct mpipe_dev_priv *priv;    /* "priv" data of its device. */
100         uint16_t nb_desc;               /* Number of tx descriptors. */
101         uint16_t port_id;               /* Device index. */
102         uint16_t stat_idx;              /* Queue stats index. */
103         uint8_t queue_idx;              /* Queue index. */
104         uint8_t link_status;            /* 0 = link down. */
105         struct mpipe_queue_stats stats; /* Stat data for the queue. */
106 };
107
108 /* Transmit queue description. */
109 struct mpipe_tx_queue {
110         struct mpipe_queue q;           /* Common stuff. */
111 };
112
113 /* Receive queue description. */
114 struct mpipe_rx_queue {
115         struct mpipe_queue q;           /* Common stuff. */
116         gxio_mpipe_iqueue_t iqueue;     /* mPIPE iqueue. */
117         gxio_mpipe_idesc_t *next_desc;  /* Next idesc to process. */
118         int avail_descs;                /* Number of available descs. */
119         void *rx_ring_mem;              /* DMA ring memory. */
120 };
121
122 struct mpipe_dev_priv {
123         gxio_mpipe_context_t *context;  /* mPIPE context. */
124         gxio_mpipe_link_t link;         /* mPIPE link for the device. */
125         gxio_mpipe_equeue_t equeue;     /* mPIPE equeue. */
126         unsigned equeue_size;           /* mPIPE equeue desc count. */
127         int instance;                   /* mPIPE instance. */
128         int ering;                      /* mPIPE eDMA ring. */
129         int stack;                      /* mPIPE buffer stack. */
130         int channel;                    /* Device channel. */
131         int port_id;                    /* DPDK port index. */
132         struct rte_eth_dev *eth_dev;    /* DPDK device. */
133         struct rte_mbuf **tx_comps;     /* TX completion array. */
134         struct rte_mempool *rx_mpool;   /* mpool used by the rx queues. */
135         unsigned rx_offset;             /* Receive head room. */
136         unsigned rx_size_code;          /* mPIPE rx buffer size code. */
137         int is_xaui:1,                  /* Is this an xgbe or gbe? */
138             initialized:1,              /* Initialized port? */
139             running:1;                  /* Running port? */
140         struct ether_addr mac_addr;     /* MAC address. */
141         unsigned nb_rx_queues;          /* Configured tx queues. */
142         unsigned nb_tx_queues;          /* Configured rx queues. */
143         int first_bucket;               /* mPIPE bucket start index. */
144         int first_ring;                 /* mPIPE notif ring start index. */
145         int notif_group;                /* mPIPE notif group. */
146         rte_atomic32_t dp_count __rte_cache_aligned;    /* DP Entry count. */
147         int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
148         int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
149 };
150
151 #define mpipe_priv(dev)                 \
152         ((struct mpipe_dev_priv*)(dev)->data->dev_private)
153
154 #define mpipe_name(priv)                \
155         ((priv)->eth_dev->data->name)
156
157 #define mpipe_rx_queue(priv, n)         \
158         ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
159
160 #define mpipe_tx_queue(priv, n)         \
161         ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
162
163 static void
164 mpipe_xmit_flush(struct mpipe_dev_priv *priv);
165
166 static void
167 mpipe_recv_flush(struct mpipe_dev_priv *priv);
168
169 static int mpipe_equeue_sizes[] = {
170         [GXIO_MPIPE_EQUEUE_ENTRY_512]   = 512,
171         [GXIO_MPIPE_EQUEUE_ENTRY_2K]    = 2048,
172         [GXIO_MPIPE_EQUEUE_ENTRY_8K]    = 8192,
173         [GXIO_MPIPE_EQUEUE_ENTRY_64K]   = 65536,
174 };
175
176 static int mpipe_iqueue_sizes[] = {
177         [GXIO_MPIPE_IQUEUE_ENTRY_128]   = 128,
178         [GXIO_MPIPE_IQUEUE_ENTRY_512]   = 512,
179         [GXIO_MPIPE_IQUEUE_ENTRY_2K]    = 2048,
180         [GXIO_MPIPE_IQUEUE_ENTRY_64K]   = 65536,
181 };
182
183 static int mpipe_buffer_sizes[] = {
184         [GXIO_MPIPE_BUFFER_SIZE_128]    = 128,
185         [GXIO_MPIPE_BUFFER_SIZE_256]    = 256,
186         [GXIO_MPIPE_BUFFER_SIZE_512]    = 512,
187         [GXIO_MPIPE_BUFFER_SIZE_1024]   = 1024,
188         [GXIO_MPIPE_BUFFER_SIZE_1664]   = 1664,
189         [GXIO_MPIPE_BUFFER_SIZE_4096]   = 4096,
190         [GXIO_MPIPE_BUFFER_SIZE_10368]  = 10368,
191         [GXIO_MPIPE_BUFFER_SIZE_16384]  = 16384,
192 };
193
194 static gxio_mpipe_context_t *
195 mpipe_context(int instance)
196 {
197         if (instance < 0 || instance >= mpipe_instances)
198                 return NULL;
199         return &mpipe_contexts[instance].context;
200 }
201
202 static int mpipe_channel_config(int instance, int channel,
203                                 struct mpipe_channel_config *config)
204 {
205         struct mpipe_channel_config *data;
206         struct mpipe_context *context;
207         gxio_mpipe_rules_t rules;
208         int idx, rc = 0;
209
210         if (instance < 0 || instance >= mpipe_instances ||
211             channel < 0 || channel >= MPIPE_MAX_CHANNELS)
212                 return -EINVAL;
213
214         context = &mpipe_contexts[instance];
215
216         rte_spinlock_lock(&context->lock);
217
218         gxio_mpipe_rules_init(&rules, &context->context);
219
220         for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
221                 data = (channel == idx) ? config : &context->channels[idx];
222
223                 if (!data->enable)
224                         continue;
225
226                 rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
227                                             data->num_buckets, &data->stacks);
228                 if (rc < 0) {
229                         goto done;
230                 }
231
232                 rc = gxio_mpipe_rules_add_channel(&rules, idx);
233                 if (rc < 0) {
234                         goto done;
235                 }
236
237                 rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
238                 if (rc < 0) {
239                         goto done;
240                 }
241         }
242
243         rc = gxio_mpipe_rules_commit(&rules);
244         if (rc == 0) {
245                 memcpy(&context->channels[channel], config, sizeof(*config));
246         }
247
248 done:
249         rte_spinlock_unlock(&context->lock);
250
251         return rc;
252 }
253
254 static int
255 mpipe_get_size_index(int *array, int count, int size,
256                      bool roundup)
257 {
258         int i, last = -1;
259
260         for (i = 0; i < count && array[i] < size; i++) {
261                 if (array[i])
262                         last = i;
263         }
264
265         if (roundup)
266                 return i < count ? (int)i : -ENOENT;
267         else
268                 return last >= 0 ? last : -ENOENT;
269 }
270
271 static int
272 mpipe_calc_size(int *array, int count, int size)
273 {
274         int index = mpipe_get_size_index(array, count, size, 1);
275         return index < 0 ? index : array[index];
276 }
277
278 static int mpipe_equeue_size(int size)
279 {
280         int result;
281         result = mpipe_calc_size(mpipe_equeue_sizes,
282                                  RTE_DIM(mpipe_equeue_sizes), size);
283         return result;
284 }
285
286 static int mpipe_iqueue_size(int size)
287 {
288         int result;
289         result = mpipe_calc_size(mpipe_iqueue_sizes,
290                                  RTE_DIM(mpipe_iqueue_sizes), size);
291         return result;
292 }
293
294 static int mpipe_buffer_size_index(int size)
295 {
296         int result;
297         result = mpipe_get_size_index(mpipe_buffer_sizes,
298                                       RTE_DIM(mpipe_buffer_sizes), size, 0);
299         return result;
300 }
301
302 static inline int
303 mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
304                                   struct rte_eth_link *link)
305 {
306         struct rte_eth_link *dst = link;
307         struct rte_eth_link *src = &(dev->data->dev_link);
308
309         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
310                                 *(uint64_t *)src) == 0)
311                 return -1;
312
313         return 0;
314 }
315
316 static inline int
317 mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
318                                    struct rte_eth_link *link)
319 {
320         struct rte_eth_link *dst = &(dev->data->dev_link);
321         struct rte_eth_link *src = link;
322
323         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
324                                 *(uint64_t *)src) == 0)
325                 return -1;
326
327         return 0;
328 }
329
330 static void
331 mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
332                 struct rte_eth_dev_info *dev_info)
333 {
334         dev_info->min_rx_bufsize  = 128;
335         dev_info->max_rx_pktlen   = 1518;
336         dev_info->max_tx_queues   = MPIPE_TX_MAX_QUEUES;
337         dev_info->max_rx_queues   = MPIPE_RX_MAX_QUEUES;
338         dev_info->max_mac_addrs   = 1;
339         dev_info->rx_offload_capa = 0;
340         dev_info->tx_offload_capa = 0;
341 }
342
343 static int
344 mpipe_configure(struct rte_eth_dev *dev)
345 {
346         struct mpipe_dev_priv *priv = mpipe_priv(dev);
347
348         if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
349                 RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
350                         mpipe_name(priv), dev->data->nb_tx_queues,
351                         MPIPE_TX_MAX_QUEUES);
352                 return -EINVAL;
353         }
354         priv->nb_tx_queues = dev->data->nb_tx_queues;
355
356         if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
357                 RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
358                         mpipe_name(priv), dev->data->nb_rx_queues,
359                         MPIPE_RX_MAX_QUEUES);
360         }
361         priv->nb_rx_queues = dev->data->nb_rx_queues;
362
363         return 0;
364 }
365
366 static inline int
367 mpipe_link_compare(struct rte_eth_link *link1,
368                    struct rte_eth_link *link2)
369 {
370         return (*(uint64_t *)link1 == *(uint64_t *)link2)
371                 ? -1 : 0;
372 }
373
374 static int
375 mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
376 {
377         struct mpipe_dev_priv *priv = mpipe_priv(dev);
378         struct rte_eth_link old, new;
379         int64_t state, speed;
380         int count, rc;
381
382         memset(&old, 0, sizeof(old));
383         memset(&new, 0, sizeof(new));
384         mpipe_dev_atomic_read_link_status(dev, &old);
385
386         for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
387                 if (!priv->initialized)
388                         break;
389
390                 state = gxio_mpipe_link_get_attr(&priv->link,
391                                                  GXIO_MPIPE_LINK_CURRENT_STATE);
392                 if (state < 0)
393                         break;
394
395                 speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
396
397                 new.link_autoneg = (dev->data->dev_conf.link_speeds &
398                                 ETH_LINK_SPEED_AUTONEG);
399                 if (speed == GXIO_MPIPE_LINK_1G) {
400                         new.link_speed = ETH_SPEED_NUM_1G;
401                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
402                         new.link_status = ETH_LINK_UP;
403                 } else if (speed == GXIO_MPIPE_LINK_10G) {
404                         new.link_speed = ETH_SPEED_NUM_10G;
405                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
406                         new.link_status = ETH_LINK_UP;
407                 }
408
409                 rc = mpipe_link_compare(&old, &new);
410                 if (rc == 0 || !wait_to_complete)
411                         break;
412
413                 rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
414         }
415
416         mpipe_dev_atomic_write_link_status(dev, &new);
417         return rc;
418 }
419
420 static int
421 mpipe_set_link(struct rte_eth_dev *dev, int up)
422 {
423         struct mpipe_dev_priv *priv = mpipe_priv(dev);
424         int rc;
425
426         rc = gxio_mpipe_link_set_attr(&priv->link,
427                                       GXIO_MPIPE_LINK_DESIRED_STATE,
428                                       up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
429         if (rc < 0) {
430                 RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
431                         mpipe_name(priv), up ? "up" : "down");
432         } else {
433                 mpipe_link_update(dev, 0);
434         }
435
436         return rc;
437 }
438
439 static int
440 mpipe_set_link_up(struct rte_eth_dev *dev)
441 {
442         return mpipe_set_link(dev, 1);
443 }
444
445 static int
446 mpipe_set_link_down(struct rte_eth_dev *dev)
447 {
448         return mpipe_set_link(dev, 0);
449 }
450
451 static inline void
452 mpipe_dp_enter(struct mpipe_dev_priv *priv)
453 {
454         __insn_mtspr(SPR_DSTREAM_PF, 0);
455         rte_atomic32_inc(&priv->dp_count);
456 }
457
458 static inline void
459 mpipe_dp_exit(struct mpipe_dev_priv *priv)
460 {
461         rte_atomic32_dec(&priv->dp_count);
462 }
463
464 static inline void
465 mpipe_dp_wait(struct mpipe_dev_priv *priv)
466 {
467         while (rte_atomic32_read(&priv->dp_count) != 0) {
468                 rte_pause();
469         }
470 }
471
472 static inline int
473 mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
474 {
475         return (mbuf->port < RTE_MAX_ETHPORTS) ?
476                 mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
477                 priv->stack;
478 }
479
480 static inline struct rte_mbuf *
481 mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
482                 int in_port)
483 {
484         void *va = gxio_mpipe_idesc_get_va(idesc);
485         uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
486         struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
487
488         rte_pktmbuf_reset(mbuf);
489         mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
490         mbuf->port     = in_port;
491         mbuf->data_len = size;
492         mbuf->pkt_len  = size;
493         mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
494
495         PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
496                      mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
497
498         return mbuf;
499 }
500
501 static inline void
502 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
503 {
504         const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
505         void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
506
507         gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
508         PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
509                      mpipe_name(priv), mbuf, buf_addr, priv->stack);
510 }
511
512 static inline void
513 mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
514 {
515         struct rte_mbuf *mbuf;
516         int i;
517
518         for (i = 0; i < count; i++) {
519                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
520                 if (!mbuf)
521                         break;
522                 mpipe_recv_push(priv, mbuf);
523         }
524
525         PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
526 }
527
528 static inline void
529 mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
530 {
531         const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
532         uint8_t in_port = priv->port_id;
533         struct rte_mbuf *mbuf;
534         void *va;
535
536         while (1) {
537                 va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
538                 if (!va)
539                         break;
540                 mbuf = RTE_PTR_SUB(va, offset);
541
542                 PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
543                              mpipe_name(priv), mbuf, va);
544
545                 mbuf->data_off    = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
546                 mbuf->refcnt      = 1;
547                 mbuf->nb_segs     = 1;
548                 mbuf->port        = in_port;
549                 mbuf->packet_type = 0;
550                 mbuf->data_len    = 0;
551                 mbuf->pkt_len     = 0;
552
553                 __rte_mbuf_raw_free(mbuf);
554         }
555 }
556
557 static void
558 mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
559 {
560         size_t size = ms->hugepage_sz;
561         uint8_t *addr, *end;
562         int rc;
563
564         for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
565                 rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
566                                               size, 0);
567                 if (rc < 0)
568                         break;
569         }
570
571         if (rc < 0) {
572                 RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
573                         mpipe_name(priv), ms->addr, rc);
574         } else {
575                 RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
576                         mpipe_name(priv), ms->addr,
577                         RTE_PTR_ADD(ms->addr, ms->len - 1));
578         }
579 }
580
581 static int
582 mpipe_recv_init(struct mpipe_dev_priv *priv)
583 {
584         const struct rte_memseg *seg = rte_eal_get_physmem_layout();
585         size_t stack_size;
586         void *stack_mem;
587         int rc;
588
589         if (!priv->rx_mpool) {
590                 RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
591                         mpipe_name(priv));
592                 return -ENODEV;
593         }
594
595         /* Allocate one NotifRing for each queue. */
596         rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
597                                           0, 0);
598         if (rc < 0) {
599                 RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
600                         mpipe_name(priv));
601                 return rc;
602         }
603         priv->first_ring = rc;
604
605         /* Allocate a NotifGroup. */
606         rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
607         if (rc < 0) {
608                 RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
609                         mpipe_name(priv));
610                 return rc;
611         }
612         priv->notif_group = rc;
613
614         /* Allocate required buckets. */
615         rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
616         if (rc < 0) {
617                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
618                         mpipe_name(priv));
619                 return rc;
620         }
621         priv->first_bucket = rc;
622
623         rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
624         if (rc < 0) {
625                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
626                         mpipe_name(priv));
627                 return rc;
628         }
629         priv->stack = rc;
630
631         while (seg && seg->addr)
632                 mpipe_register_segment(priv, seg++);
633
634         stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
635         stack_mem = rte_zmalloc(NULL, stack_size, 65536);
636         if (!stack_mem) {
637                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
638                         mpipe_name(priv));
639                 return -ENOMEM;
640         } else {
641                 RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
642                         mpipe_name(priv), stack_mem,
643                         RTE_PTR_ADD(stack_mem, stack_size - 1));
644         }
645
646         rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
647                                           priv->rx_size_code, stack_mem,
648                                           stack_size, 0);
649         if (rc < 0) {
650                 RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
651                         mpipe_name(priv));
652                 return rc;
653         }
654
655         return 0;
656 }
657
658 static int
659 mpipe_xmit_init(struct mpipe_dev_priv *priv)
660 {
661         size_t ring_size;
662         void *ring_mem;
663         int rc;
664
665         /* Allocate eDMA ring. */
666         rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
667         if (rc < 0) {
668                 RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
669                         mpipe_name(priv));
670                 return rc;
671         }
672         priv->ering = rc;
673
674         rc = mpipe_equeue_size(MPIPE_TX_DESCS);
675         if (rc < 0) {
676                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
677                         mpipe_name(priv), (int)MPIPE_TX_DESCS);
678                 return -ENOMEM;
679         }
680         priv->equeue_size = rc;
681
682         /* Initialize completion array. */
683         ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
684         priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
685         if (!priv->tx_comps) {
686                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
687                         mpipe_name(priv));
688                 return -ENOMEM;
689         }
690
691         /* Allocate eDMA ring memory. */
692         ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
693         ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
694         if (!ring_mem) {
695                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
696                         mpipe_name(priv));
697                 return -ENOMEM;
698         } else {
699                 RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
700                         mpipe_name(priv), ring_mem,
701                         RTE_PTR_ADD(ring_mem, ring_size - 1));
702         }
703
704         /* Initialize eDMA ring. */
705         rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
706                                     priv->channel, ring_mem, ring_size, 0);
707         if (rc < 0) {
708                 RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
709                         mpipe_name(priv));
710                 return rc;
711         }
712
713         return 0;
714 }
715
716 static int
717 mpipe_link_init(struct mpipe_dev_priv *priv)
718 {
719         int rc;
720
721         /* Open the link. */
722         rc = gxio_mpipe_link_open(&priv->link, priv->context,
723                                   mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
724         if (rc < 0) {
725                 RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
726                         mpipe_name(priv));
727                 return rc;
728         }
729
730         /* Get the channel index. */
731         rc = gxio_mpipe_link_channel(&priv->link);
732         if (rc < 0) {
733                 RTE_LOG(ERR, PMD, "%s: Bad channel\n",
734                         mpipe_name(priv));
735                 return rc;
736         }
737         priv->channel = rc;
738
739         return 0;
740 }
741
742 static int
743 mpipe_init(struct mpipe_dev_priv *priv)
744 {
745         int rc;
746
747         if (priv->initialized)
748                 return 0;
749
750         rc = mpipe_recv_init(priv);
751         if (rc < 0) {
752                 RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
753                         mpipe_name(priv));
754                 return rc;
755         }
756
757         rc = mpipe_xmit_init(priv);
758         if (rc < 0) {
759                 RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
760                         mpipe_name(priv));
761                 rte_free(priv);
762                 return rc;
763         }
764
765         priv->initialized = 1;
766
767         return 0;
768 }
769
770 static int
771 mpipe_start(struct rte_eth_dev *dev)
772 {
773         struct mpipe_dev_priv *priv = mpipe_priv(dev);
774         struct mpipe_channel_config config;
775         struct mpipe_rx_queue *rx_queue;
776         struct rte_eth_link eth_link;
777         unsigned queue, buffers = 0;
778         size_t ring_size;
779         void *ring_mem;
780         int rc;
781
782         memset(&eth_link, 0, sizeof(eth_link));
783         mpipe_dev_atomic_write_link_status(dev, &eth_link);
784
785         rc = mpipe_init(priv);
786         if (rc < 0)
787                 return rc;
788
789         /* Initialize NotifRings. */
790         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
791                 rx_queue = mpipe_rx_queue(priv, queue);
792                 ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
793
794                 ring_mem = rte_malloc(NULL, ring_size, ring_size);
795                 if (!ring_mem) {
796                         RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
797                                 mpipe_name(priv));
798                         return -ENOMEM;
799                 } else {
800                         RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
801                                 mpipe_name(priv), queue, ring_mem,
802                                 RTE_PTR_ADD(ring_mem, ring_size - 1));
803                 }
804
805                 rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
806                                             priv->first_ring + queue, ring_mem,
807                                             ring_size, 0);
808                 if (rc < 0) {
809                         RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
810                                 mpipe_name(priv));
811                         return rc;
812                 }
813
814                 rx_queue->rx_ring_mem = ring_mem;
815                 buffers += rx_queue->q.nb_desc;
816         }
817
818         /* Initialize ingress NotifGroup and buckets. */
819         rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
820                         priv->notif_group, priv->first_ring, priv->nb_rx_queues,
821                         priv->first_bucket, MPIPE_RX_BUCKETS,
822                         GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
823         if (rc < 0) {
824                 RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
825                         mpipe_name(priv));
826                 return rc;
827         }
828
829         /* Configure the classifier to deliver packets from this port. */
830         config.enable = 1;
831         config.first_bucket = priv->first_bucket;
832         config.num_buckets = MPIPE_RX_BUCKETS;
833         memset(&config.stacks, 0xff, sizeof(config.stacks));
834         config.stacks.stacks[priv->rx_size_code] = priv->stack;
835         config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
836
837         rc = mpipe_channel_config(priv->instance, priv->channel,
838                                   &config);
839         if (rc < 0) {
840                 RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
841                         mpipe_name(priv));
842                 return rc;
843         }
844
845         /* Fill empty buffers into the buffer stack. */
846         mpipe_recv_fill_stack(priv, buffers);
847
848         /* Bring up the link. */
849         mpipe_set_link_up(dev);
850
851         /* Start xmit/recv on queues. */
852         for (queue = 0; queue < priv->nb_tx_queues; queue++)
853                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
854         for (queue = 0; queue < priv->nb_rx_queues; queue++)
855                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
856         priv->running = 1;
857
858         return 0;
859 }
860
861 static void
862 mpipe_stop(struct rte_eth_dev *dev)
863 {
864         struct mpipe_dev_priv *priv = mpipe_priv(dev);
865         struct mpipe_channel_config config;
866         unsigned queue;
867         int rc;
868
869         for (queue = 0; queue < priv->nb_tx_queues; queue++)
870                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
871         for (queue = 0; queue < priv->nb_rx_queues; queue++)
872                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
873
874         /* Make sure the link_status writes land. */
875         rte_wmb();
876
877         /*
878          * Wait for link_status change to register with straggling datapath
879          * threads.
880          */
881         mpipe_dp_wait(priv);
882
883         /* Bring down the link. */
884         mpipe_set_link_down(dev);
885
886         /* Remove classifier rules. */
887         memset(&config, 0, sizeof(config));
888         rc = mpipe_channel_config(priv->instance, priv->channel,
889                                   &config);
890         if (rc < 0) {
891                 RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
892                         mpipe_name(priv));
893         }
894
895         /* Flush completed xmit packets. */
896         mpipe_xmit_flush(priv);
897
898         /* Flush buffer stacks. */
899         mpipe_recv_flush(priv);
900
901         priv->running = 0;
902 }
903
904 static void
905 mpipe_close(struct rte_eth_dev *dev)
906 {
907         struct mpipe_dev_priv *priv = mpipe_priv(dev);
908         if (priv->running)
909                 mpipe_stop(dev);
910 }
911
912 static void
913 mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
914 {
915         struct mpipe_dev_priv *priv = mpipe_priv(dev);
916         struct mpipe_tx_queue *tx_queue;
917         struct mpipe_rx_queue *rx_queue;
918         unsigned i;
919         uint16_t idx;
920
921         memset(stats, 0, sizeof(*stats));
922
923         for (i = 0; i < priv->nb_tx_queues; i++) {
924                 tx_queue = mpipe_tx_queue(priv, i);
925
926                 stats->opackets += tx_queue->q.stats.packets;
927                 stats->obytes   += tx_queue->q.stats.bytes;
928                 stats->oerrors  += tx_queue->q.stats.errors;
929
930                 idx = tx_queue->q.stat_idx;
931                 if (idx != (uint16_t)-1) {
932                         stats->q_opackets[idx] += tx_queue->q.stats.packets;
933                         stats->q_obytes[idx]   += tx_queue->q.stats.bytes;
934                         stats->q_errors[idx]   += tx_queue->q.stats.errors;
935                 }
936         }
937
938         for (i = 0; i < priv->nb_rx_queues; i++) {
939                 rx_queue = mpipe_rx_queue(priv, i);
940
941                 stats->ipackets  += rx_queue->q.stats.packets;
942                 stats->ibytes    += rx_queue->q.stats.bytes;
943                 stats->ierrors   += rx_queue->q.stats.errors;
944                 stats->rx_nombuf += rx_queue->q.stats.nomem;
945
946                 idx = rx_queue->q.stat_idx;
947                 if (idx != (uint16_t)-1) {
948                         stats->q_ipackets[idx] += rx_queue->q.stats.packets;
949                         stats->q_ibytes[idx]   += rx_queue->q.stats.bytes;
950                         stats->q_errors[idx]   += rx_queue->q.stats.errors;
951                 }
952         }
953 }
954
955 static void
956 mpipe_stats_reset(struct rte_eth_dev *dev)
957 {
958         struct mpipe_dev_priv *priv = mpipe_priv(dev);
959         struct mpipe_tx_queue *tx_queue;
960         struct mpipe_rx_queue *rx_queue;
961         unsigned i;
962
963         for (i = 0; i < priv->nb_tx_queues; i++) {
964                 tx_queue = mpipe_tx_queue(priv, i);
965                 memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
966         }
967
968         for (i = 0; i < priv->nb_rx_queues; i++) {
969                 rx_queue = mpipe_rx_queue(priv, i);
970                 memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
971         }
972 }
973
974 static int
975 mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
976                               uint8_t stat_idx, uint8_t is_rx)
977 {
978         struct mpipe_dev_priv *priv = mpipe_priv(dev);
979
980         if (is_rx) {
981                 priv->rx_stat_mapping[stat_idx] = queue_id;
982         } else {
983                 priv->tx_stat_mapping[stat_idx] = queue_id;
984         }
985
986         return 0;
987 }
988
989 static int
990 mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
991                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
992                      const struct rte_eth_txconf *tx_conf __rte_unused)
993 {
994         struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
995         struct mpipe_dev_priv *priv = mpipe_priv(dev);
996         uint16_t idx;
997
998         tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
999                                RTE_CACHE_LINE_SIZE);
1000         if (!tx_queue) {
1001                 RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
1002                         mpipe_name(priv));
1003                 return -ENOMEM;
1004         }
1005
1006         memset(&tx_queue->q, 0, sizeof(tx_queue->q));
1007         tx_queue->q.priv = priv;
1008         tx_queue->q.queue_idx = queue_idx;
1009         tx_queue->q.port_id = dev->data->port_id;
1010         tx_queue->q.nb_desc = nb_desc;
1011
1012         tx_queue->q.stat_idx = -1;
1013         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1014                 if (priv->tx_stat_mapping[idx] == queue_idx)
1015                         tx_queue->q.stat_idx = idx;
1016         }
1017
1018         dev->data->tx_queues[queue_idx] = tx_queue;
1019
1020         return 0;
1021 }
1022
1023 static void
1024 mpipe_tx_queue_release(void *_txq)
1025 {
1026         rte_free(_txq);
1027 }
1028
1029 static int
1030 mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1031                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
1032                      const struct rte_eth_rxconf *rx_conf __rte_unused,
1033                      struct rte_mempool *mp)
1034 {
1035         struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
1036         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1037         uint16_t idx;
1038         int size, rc;
1039
1040         rc = mpipe_iqueue_size(nb_desc);
1041         if (rc < 0) {
1042                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
1043                         mpipe_name(priv), (int)nb_desc);
1044                 return -ENOMEM;
1045         }
1046
1047         if (rc != nb_desc) {
1048                 RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
1049                         mpipe_name(priv), (int)nb_desc, rc);
1050                 nb_desc = rc;
1051         }
1052
1053         size = sizeof(*rx_queue);
1054         rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
1055         if (!rx_queue) {
1056                 RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
1057                         mpipe_name(priv));
1058                 return -ENOMEM;
1059         }
1060
1061         memset(&rx_queue->q, 0, sizeof(rx_queue->q));
1062         rx_queue->q.priv = priv;
1063         rx_queue->q.nb_desc = nb_desc;
1064         rx_queue->q.port_id = dev->data->port_id;
1065         rx_queue->q.queue_idx = queue_idx;
1066
1067         if (!priv->rx_mpool) {
1068                 int size = (rte_pktmbuf_data_room_size(mp) -
1069                             RTE_PKTMBUF_HEADROOM -
1070                             MPIPE_RX_IP_ALIGN);
1071
1072                 priv->rx_offset = (sizeof(struct rte_mbuf) +
1073                                    rte_pktmbuf_priv_size(mp) +
1074                                    RTE_PKTMBUF_HEADROOM +
1075                                    MPIPE_RX_IP_ALIGN);
1076                 if (size < 0) {
1077                         RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
1078                                 mpipe_name(priv),
1079                                 rte_pktmbuf_data_room_size(mp));
1080                         return -ENOMEM;
1081                 }
1082
1083                 priv->rx_size_code = mpipe_buffer_size_index(size);
1084                 priv->rx_mpool = mp;
1085         }
1086
1087         if (priv->rx_mpool != mp) {
1088                 RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
1089                         mpipe_name(priv));
1090         }
1091
1092         rx_queue->q.stat_idx = -1;
1093         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1094                 if (priv->rx_stat_mapping[idx] == queue_idx)
1095                         rx_queue->q.stat_idx = idx;
1096         }
1097
1098         dev->data->rx_queues[queue_idx] = rx_queue;
1099
1100         return 0;
1101 }
1102
1103 static void
1104 mpipe_rx_queue_release(void *_rxq)
1105 {
1106         rte_free(_rxq);
1107 }
1108
1109 #define MPIPE_XGBE_ENA_HASH_MULTI       \
1110         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
1111 #define MPIPE_XGBE_ENA_HASH_UNI         \
1112         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
1113 #define MPIPE_XGBE_COPY_ALL             \
1114         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
1115 #define MPIPE_GBE_ENA_MULTI_HASH        \
1116         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
1117 #define MPIPE_GBE_ENA_UNI_HASH          \
1118         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
1119 #define MPIPE_GBE_COPY_ALL              \
1120         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
1121
1122 static void
1123 mpipe_promiscuous_enable(struct rte_eth_dev *dev)
1124 {
1125         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1126         int64_t reg;
1127         int addr;
1128
1129         if (priv->is_xaui) {
1130                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1131                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1132                 reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
1133                 reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
1134                 reg |=  MPIPE_XGBE_COPY_ALL;
1135                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1136         } else {
1137                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1138                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1139                 reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
1140                 reg &= ~MPIPE_GBE_ENA_UNI_HASH;
1141                 reg |=  MPIPE_GBE_COPY_ALL;
1142                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1143         }
1144 }
1145
1146 static void
1147 mpipe_promiscuous_disable(struct rte_eth_dev *dev)
1148 {
1149         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1150         int64_t reg;
1151         int addr;
1152
1153         if (priv->is_xaui) {
1154                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1155                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1156                 reg |=  MPIPE_XGBE_ENA_HASH_MULTI;
1157                 reg |=  MPIPE_XGBE_ENA_HASH_UNI;
1158                 reg &= ~MPIPE_XGBE_COPY_ALL;
1159                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1160         } else {
1161                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1162                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1163                 reg |=  MPIPE_GBE_ENA_MULTI_HASH;
1164                 reg |=  MPIPE_GBE_ENA_UNI_HASH;
1165                 reg &= ~MPIPE_GBE_COPY_ALL;
1166                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1167         }
1168 }
1169
1170 static const struct eth_dev_ops mpipe_dev_ops = {
1171         .dev_infos_get           = mpipe_infos_get,
1172         .dev_configure           = mpipe_configure,
1173         .dev_start               = mpipe_start,
1174         .dev_stop                = mpipe_stop,
1175         .dev_close               = mpipe_close,
1176         .stats_get               = mpipe_stats_get,
1177         .stats_reset             = mpipe_stats_reset,
1178         .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
1179         .tx_queue_setup          = mpipe_tx_queue_setup,
1180         .rx_queue_setup          = mpipe_rx_queue_setup,
1181         .tx_queue_release        = mpipe_tx_queue_release,
1182         .rx_queue_release        = mpipe_rx_queue_release,
1183         .link_update             = mpipe_link_update,
1184         .dev_set_link_up         = mpipe_set_link_up,
1185         .dev_set_link_down       = mpipe_set_link_down,
1186         .promiscuous_enable      = mpipe_promiscuous_enable,
1187         .promiscuous_disable     = mpipe_promiscuous_disable,
1188 };
1189
1190 static inline void
1191 mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
1192 {
1193         gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
1194         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1195         int64_t slot;
1196
1197         for (slot = start; slot < end; slot++) {
1198                 gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
1199         }
1200 }
1201
1202 static void
1203 mpipe_xmit_flush(struct mpipe_dev_priv *priv)
1204 {
1205         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1206         int64_t slot;
1207
1208         /* Post a dummy descriptor and wait for its return. */
1209         slot = gxio_mpipe_equeue_reserve(equeue, 1);
1210         if (slot < 0) {
1211                 RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
1212                         mpipe_name(priv));
1213                 return;
1214         }
1215
1216         mpipe_xmit_null(priv, slot, slot + 1);
1217
1218         while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
1219                 rte_pause();
1220         }
1221
1222         for (slot = 0; slot < priv->equeue_size; slot++) {
1223                 if (priv->tx_comps[slot])
1224                         rte_pktmbuf_free_seg(priv->tx_comps[slot]);
1225         }
1226 }
1227
1228 static void
1229 mpipe_recv_flush(struct mpipe_dev_priv *priv)
1230 {
1231         uint8_t in_port = priv->port_id;
1232         struct mpipe_rx_queue *rx_queue;
1233         gxio_mpipe_iqueue_t *iqueue;
1234         gxio_mpipe_idesc_t idesc;
1235         struct rte_mbuf *mbuf;
1236         unsigned queue;
1237
1238         /* Release packets on the buffer stack. */
1239         mpipe_recv_flush_stack(priv);
1240
1241         /* Flush packets sitting in recv queues. */
1242         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
1243                 rx_queue = mpipe_rx_queue(priv, queue);
1244                 iqueue = &rx_queue->iqueue;
1245                 while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
1246                         /* Skip idesc with the 'buffer error' bit set. */
1247                         if (idesc.be)
1248                                 continue;
1249                         mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
1250                         rte_pktmbuf_free(mbuf);
1251                 }
1252                 rte_free(rx_queue->rx_ring_mem);
1253         }
1254 }
1255
1256 static inline uint16_t
1257 mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
1258               uint16_t nb_pkts)
1259 {
1260         struct mpipe_dev_priv *priv = tx_queue->q.priv;
1261         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1262         unsigned nb_bytes = 0;
1263         unsigned nb_sent = 0;
1264         int nb_slots, i;
1265         uint8_t port_id;
1266
1267         PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
1268                      nb_pkts, mpipe_name(tx_queue->q.priv),
1269                      tx_queue->q.queue_idx);
1270
1271         /* Optimistic assumption that we need exactly one slot per packet. */
1272         nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
1273
1274         do {
1275                 struct rte_mbuf *mbuf = NULL, *pkt = NULL;
1276                 int64_t slot;
1277
1278                 /* Reserve eDMA ring slots. */
1279                 slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
1280                 if (unlikely(slot < 0)) {
1281                         break;
1282                 }
1283
1284                 for (i = 0; i < nb_slots; i++) {
1285                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1286                         rte_prefetch0(priv->tx_comps[idx]);
1287                 }
1288
1289                 /* Fill up slots with descriptor and completion info. */
1290                 for (i = 0; i < nb_slots; i++) {
1291                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1292                         gxio_mpipe_edesc_t desc;
1293                         struct rte_mbuf *next;
1294
1295                         /* Starting on a new packet? */
1296                         if (likely(!mbuf)) {
1297                                 int room = nb_slots - i;
1298
1299                                 pkt = mbuf = tx_pkts[nb_sent];
1300
1301                                 /* Bail out if we run out of descs. */
1302                                 if (unlikely(pkt->nb_segs > room))
1303                                         break;
1304
1305                                 nb_sent++;
1306                         }
1307
1308                         /* We have a segment to send. */
1309                         next = mbuf->next;
1310
1311                         if (priv->tx_comps[idx])
1312                                 rte_pktmbuf_free_seg(priv->tx_comps[idx]);
1313
1314                         port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
1315                                                 mbuf->port : priv->port_id;
1316                         desc = (gxio_mpipe_edesc_t) { {
1317                                 .va        = rte_pktmbuf_mtod(mbuf, uintptr_t),
1318                                 .xfer_size = rte_pktmbuf_data_len(mbuf),
1319                                 .bound     = next ? 0 : 1,
1320                                 .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
1321                                 .size      = priv->rx_size_code,
1322                         } };
1323                         if (mpipe_local.mbuf_push_debt[port_id] > 0) {
1324                                 mpipe_local.mbuf_push_debt[port_id]--;
1325                                 desc.hwb = 1;
1326                                 priv->tx_comps[idx] = NULL;
1327                         } else
1328                                 priv->tx_comps[idx] = mbuf;
1329
1330                         nb_bytes += mbuf->data_len;
1331                         gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
1332
1333                         PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
1334                                      mpipe_name(priv),
1335                                      tx_queue->q.queue_idx,
1336                                      rte_pktmbuf_mtod(mbuf, void *),
1337                                      rte_pktmbuf_data_len(mbuf));
1338
1339                         mbuf = next;
1340                 }
1341
1342                 if (unlikely(nb_sent < nb_pkts)) {
1343
1344                         /* Fill remaining slots with null descriptors. */
1345                         mpipe_xmit_null(priv, slot + i, slot + nb_slots);
1346
1347                         /*
1348                          * Calculate exact number of descriptors needed for
1349                          * the next go around.
1350                          */
1351                         nb_slots = 0;
1352                         for (i = nb_sent; i < nb_pkts; i++) {
1353                                 nb_slots += tx_pkts[i]->nb_segs;
1354                         }
1355
1356                         nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
1357                 }
1358         } while (nb_sent < nb_pkts);
1359
1360         tx_queue->q.stats.packets += nb_sent;
1361         tx_queue->q.stats.bytes   += nb_bytes;
1362
1363         return nb_sent;
1364 }
1365
1366 static inline uint16_t
1367 mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
1368               uint16_t nb_pkts)
1369 {
1370         struct mpipe_dev_priv *priv = rx_queue->q.priv;
1371         gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
1372         gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
1373         uint8_t in_port = rx_queue->q.port_id;
1374         const unsigned look_ahead = 8;
1375         int room = nb_pkts, rc = 0;
1376         unsigned nb_packets = 0;
1377         unsigned nb_dropped = 0;
1378         unsigned nb_nomem = 0;
1379         unsigned nb_bytes = 0;
1380         unsigned nb_descs, i;
1381
1382         while (room && !rc) {
1383                 if (rx_queue->avail_descs < room) {
1384                         rc = gxio_mpipe_iqueue_try_peek(iqueue,
1385                                                         &rx_queue->next_desc);
1386                         rx_queue->avail_descs = rc < 0 ? 0 : rc;
1387                 }
1388
1389                 if (unlikely(!rx_queue->avail_descs)) {
1390                         break;
1391                 }
1392
1393                 nb_descs = RTE_MIN(room, rx_queue->avail_descs);
1394
1395                 first_idesc = rx_queue->next_desc;
1396                 last_idesc  = first_idesc + nb_descs;
1397
1398                 rx_queue->next_desc   += nb_descs;
1399                 rx_queue->avail_descs -= nb_descs;
1400
1401                 for (i = 1; i < look_ahead; i++) {
1402                         rte_prefetch0(first_idesc + i);
1403                 }
1404
1405                 PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
1406                              mpipe_name(rx_queue->q.priv),
1407                              rx_queue->q.queue_idx,
1408                              nb_descs);
1409
1410                 for (idesc = first_idesc; idesc < last_idesc; idesc++) {
1411                         struct rte_mbuf *mbuf;
1412
1413                         PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
1414                                      mpipe_name(priv),
1415                                      rx_queue->q.queue_idx,
1416                                      nb_packets, nb_descs);
1417
1418                         rte_prefetch0(idesc + look_ahead);
1419
1420                         PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
1421                                      "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
1422                                      mpipe_name(priv),
1423                                      rx_queue->q.queue_idx,
1424                                      idesc,
1425                                      idesc->me ? "me, " : "",
1426                                      idesc->tr ? "tr, " : "",
1427                                      idesc->ce ? "ce, " : "",
1428                                      idesc->ct ? "ct, " : "",
1429                                      idesc->cs ? "cs, " : "",
1430                                      idesc->nr ? "nr, " : "",
1431                                      idesc->sq ? "sq, " : "",
1432                                      idesc->ts ? "ts, " : "",
1433                                      idesc->ps ? "ps, " : "",
1434                                      idesc->be ? "be, " : "",
1435                                      idesc->l2_size,
1436                                      idesc->bucket_id,
1437                                      idesc->channel,
1438                                      idesc->notif_ring,
1439                                      (unsigned long)idesc->packet_sqn,
1440                                      (unsigned long)idesc->va);
1441
1442                         if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1443                                 nb_dropped++;
1444                                 gxio_mpipe_iqueue_drop(iqueue, idesc);
1445                                 PMD_DEBUG_RX("%s:%d: Descriptor error\n",
1446                                              mpipe_name(rx_queue->q.priv),
1447                                              rx_queue->q.queue_idx);
1448                                 continue;
1449                         }
1450
1451                         if (mpipe_local.mbuf_push_debt[in_port] <
1452                                         MPIPE_BUF_DEBT_THRESHOLD)
1453                                 mpipe_local.mbuf_push_debt[in_port]++;
1454                         else {
1455                                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
1456                                 if (unlikely(!mbuf)) {
1457                                         nb_nomem++;
1458                                         gxio_mpipe_iqueue_drop(iqueue, idesc);
1459                                         PMD_DEBUG_RX("%s:%d: alloc failure\n",
1460                                              mpipe_name(rx_queue->q.priv),
1461                                              rx_queue->q.queue_idx);
1462                                         continue;
1463                                 }
1464
1465                                 mpipe_recv_push(priv, mbuf);
1466                         }
1467
1468                         /* Get and setup the mbuf for the received packet. */
1469                         mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
1470
1471                         /* Update results and statistics counters. */
1472                         rx_pkts[nb_packets] = mbuf;
1473                         nb_bytes += mbuf->pkt_len;
1474                         nb_packets++;
1475                 }
1476
1477                 /*
1478                  * We release the ring in bursts, but do not track and release
1479                  * buckets.  This therefore breaks dynamic flow affinity, but
1480                  * we always operate in static affinity mode, and so we're OK
1481                  * with this optimization.
1482                  */
1483                 gxio_mpipe_iqueue_advance(iqueue, nb_descs);
1484                 gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
1485
1486                 /*
1487                  * Go around once more if we haven't yet peeked the queue, and
1488                  * if we have more room to receive.
1489                  */
1490                 room = nb_pkts - nb_packets;
1491         }
1492
1493         rx_queue->q.stats.packets += nb_packets;
1494         rx_queue->q.stats.bytes   += nb_bytes;
1495         rx_queue->q.stats.errors  += nb_dropped;
1496         rx_queue->q.stats.nomem   += nb_nomem;
1497
1498         PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
1499                      mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
1500                      nb_packets, nb_bytes, nb_dropped, nb_nomem);
1501
1502         return nb_packets;
1503 }
1504
1505 static uint16_t
1506 mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1507 {
1508         struct mpipe_rx_queue *rx_queue = _rxq;
1509         uint16_t result = 0;
1510
1511         if (rx_queue) {
1512                 mpipe_dp_enter(rx_queue->q.priv);
1513                 if (likely(rx_queue->q.link_status))
1514                         result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
1515                 mpipe_dp_exit(rx_queue->q.priv);
1516         }
1517
1518         return result;
1519 }
1520
1521 static uint16_t
1522 mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1523 {
1524         struct mpipe_tx_queue *tx_queue = _txq;
1525         uint16_t result = 0;
1526
1527         if (tx_queue) {
1528                 mpipe_dp_enter(tx_queue->q.priv);
1529                 if (likely(tx_queue->q.link_status))
1530                         result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
1531                 mpipe_dp_exit(tx_queue->q.priv);
1532         }
1533
1534         return result;
1535 }
1536
1537 static int
1538 mpipe_link_mac(const char *ifname, uint8_t *mac)
1539 {
1540         int rc, idx;
1541         char name[GXIO_MPIPE_LINK_NAME_LEN];
1542
1543         for (idx = 0, rc = 0; !rc; idx++) {
1544                 rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
1545                 if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
1546                         return 0;
1547         }
1548         return -ENODEV;
1549 }
1550
1551 static int
1552 rte_pmd_mpipe_probe(const char *ifname,
1553                       const char *params __rte_unused)
1554 {
1555         gxio_mpipe_context_t *context;
1556         struct rte_eth_dev *eth_dev;
1557         struct mpipe_dev_priv *priv;
1558         int instance, rc;
1559         uint8_t *mac;
1560
1561         /* Get the mPIPE instance that the device belongs to. */
1562         instance = gxio_mpipe_link_instance(ifname);
1563         context = mpipe_context(instance);
1564         if (!context) {
1565                 RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
1566                 return -ENODEV;
1567         }
1568
1569         priv = rte_zmalloc(NULL, sizeof(*priv), 0);
1570         if (!priv) {
1571                 RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
1572                 return -ENOMEM;
1573         }
1574
1575         memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
1576         memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
1577         priv->context = context;
1578         priv->instance = instance;
1579         priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
1580         priv->channel = -1;
1581
1582         mac = priv->mac_addr.addr_bytes;
1583         rc = mpipe_link_mac(ifname, mac);
1584         if (rc < 0) {
1585                 RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
1586                 rte_free(priv);
1587                 return -ENODEV;
1588         }
1589
1590         eth_dev = rte_eth_dev_allocate(ifname);
1591         if (!eth_dev) {
1592                 RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
1593                 rte_free(priv);
1594                 return -ENOMEM;
1595         }
1596
1597         RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
1598                 "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
1599                 ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1600
1601         priv->eth_dev = eth_dev;
1602         priv->port_id = eth_dev->data->port_id;
1603         eth_dev->data->dev_private = priv;
1604         eth_dev->data->mac_addrs = &priv->mac_addr;
1605
1606         eth_dev->data->dev_flags = 0;
1607         eth_dev->data->kdrv = RTE_KDRV_NONE;
1608         eth_dev->driver = NULL;
1609         eth_dev->data->drv_name = drivername;
1610         eth_dev->data->numa_node = instance;
1611
1612         eth_dev->dev_ops      = &mpipe_dev_ops;
1613         eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
1614         eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
1615
1616         rc = mpipe_link_init(priv);
1617         if (rc < 0) {
1618                 RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
1619                         mpipe_name(priv));
1620                 return rc;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
1627         .probe = rte_pmd_mpipe_probe,
1628 };
1629
1630 static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
1631         .probe = rte_pmd_mpipe_probe,
1632 };
1633
1634 RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
1635 RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
1636 RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
1637 RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
1638
1639 static void __attribute__((constructor, used))
1640 mpipe_init_contexts(void)
1641 {
1642         struct mpipe_context *context;
1643         int rc, instance;
1644
1645         for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
1646                 context = &mpipe_contexts[instance];
1647
1648                 rte_spinlock_init(&context->lock);
1649                 rc = gxio_mpipe_init(&context->context, instance);
1650                 if (rc < 0)
1651                         break;
1652         }
1653
1654         mpipe_instances = instance;
1655 }