New upstream version 18.08
[deb_dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include "rte_eth_ring.h"
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14
15 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
16 #define ETH_RING_ACTION_CREATE          "CREATE"
17 #define ETH_RING_ACTION_ATTACH          "ATTACH"
18 #define ETH_RING_INTERNAL_ARG           "internal"
19
20 static const char *valid_arguments[] = {
21         ETH_RING_NUMA_NODE_ACTION_ARG,
22         ETH_RING_INTERNAL_ARG,
23         NULL
24 };
25
26 struct ring_internal_args {
27         struct rte_ring * const *rx_queues;
28         const unsigned int nb_rx_queues;
29         struct rte_ring * const *tx_queues;
30         const unsigned int nb_tx_queues;
31         const unsigned int numa_node;
32         void *addr; /* self addr for sanity check */
33 };
34
35 enum dev_action {
36         DEV_CREATE,
37         DEV_ATTACH
38 };
39
40 struct ring_queue {
41         struct rte_ring *rng;
42         rte_atomic64_t rx_pkts;
43         rte_atomic64_t tx_pkts;
44         rte_atomic64_t err_pkts;
45 };
46
47 struct pmd_internals {
48         unsigned max_rx_queues;
49         unsigned max_tx_queues;
50
51         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
52         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
53
54         struct ether_addr address;
55         enum dev_action action;
56 };
57
58
59 static struct rte_eth_link pmd_link = {
60                 .link_speed = ETH_SPEED_NUM_10G,
61                 .link_duplex = ETH_LINK_FULL_DUPLEX,
62                 .link_status = ETH_LINK_DOWN,
63                 .link_autoneg = ETH_LINK_FIXED,
64 };
65
66 static int eth_ring_logtype;
67
68 #define PMD_LOG(level, fmt, args...) \
69         rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
70                 "%s(): " fmt "\n", __func__, ##args)
71
72 static uint16_t
73 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74 {
75         void **ptrs = (void *)&bufs[0];
76         struct ring_queue *r = q;
77         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
78                         ptrs, nb_bufs, NULL);
79         if (r->rng->flags & RING_F_SC_DEQ)
80                 r->rx_pkts.cnt += nb_rx;
81         else
82                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
83         return nb_rx;
84 }
85
86 static uint16_t
87 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
88 {
89         void **ptrs = (void *)&bufs[0];
90         struct ring_queue *r = q;
91         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
92                         ptrs, nb_bufs, NULL);
93         if (r->rng->flags & RING_F_SP_ENQ) {
94                 r->tx_pkts.cnt += nb_tx;
95                 r->err_pkts.cnt += nb_bufs - nb_tx;
96         } else {
97                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
98                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
99         }
100         return nb_tx;
101 }
102
103 static int
104 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
105
106 static int
107 eth_dev_start(struct rte_eth_dev *dev)
108 {
109         dev->data->dev_link.link_status = ETH_LINK_UP;
110         return 0;
111 }
112
113 static void
114 eth_dev_stop(struct rte_eth_dev *dev)
115 {
116         dev->data->dev_link.link_status = ETH_LINK_DOWN;
117 }
118
119 static int
120 eth_dev_set_link_down(struct rte_eth_dev *dev)
121 {
122         dev->data->dev_link.link_status = ETH_LINK_DOWN;
123         return 0;
124 }
125
126 static int
127 eth_dev_set_link_up(struct rte_eth_dev *dev)
128 {
129         dev->data->dev_link.link_status = ETH_LINK_UP;
130         return 0;
131 }
132
133 static int
134 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
135                                     uint16_t nb_rx_desc __rte_unused,
136                                     unsigned int socket_id __rte_unused,
137                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
138                                     struct rte_mempool *mb_pool __rte_unused)
139 {
140         struct pmd_internals *internals = dev->data->dev_private;
141         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
142         return 0;
143 }
144
145 static int
146 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
147                                     uint16_t nb_tx_desc __rte_unused,
148                                     unsigned int socket_id __rte_unused,
149                                     const struct rte_eth_txconf *tx_conf __rte_unused)
150 {
151         struct pmd_internals *internals = dev->data->dev_private;
152         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
153         return 0;
154 }
155
156
157 static void
158 eth_dev_info(struct rte_eth_dev *dev,
159                 struct rte_eth_dev_info *dev_info)
160 {
161         struct pmd_internals *internals = dev->data->dev_private;
162         dev_info->max_mac_addrs = 1;
163         dev_info->max_rx_pktlen = (uint32_t)-1;
164         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
165         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
166         dev_info->min_rx_bufsize = 0;
167         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
168 }
169
170 static int
171 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
172 {
173         unsigned i;
174         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
175         const struct pmd_internals *internal = dev->data->dev_private;
176
177         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
178                         i < dev->data->nb_rx_queues; i++) {
179                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
180                 rx_total += stats->q_ipackets[i];
181         }
182
183         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
184                         i < dev->data->nb_tx_queues; i++) {
185                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
186                 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
187                 tx_total += stats->q_opackets[i];
188                 tx_err_total += stats->q_errors[i];
189         }
190
191         stats->ipackets = rx_total;
192         stats->opackets = tx_total;
193         stats->oerrors = tx_err_total;
194
195         return 0;
196 }
197
198 static void
199 eth_stats_reset(struct rte_eth_dev *dev)
200 {
201         unsigned i;
202         struct pmd_internals *internal = dev->data->dev_private;
203         for (i = 0; i < dev->data->nb_rx_queues; i++)
204                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
205         for (i = 0; i < dev->data->nb_tx_queues; i++) {
206                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
207                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
208         }
209 }
210
211 static void
212 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
213         uint32_t index __rte_unused)
214 {
215 }
216
217 static int
218 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
219         struct ether_addr *mac_addr __rte_unused,
220         uint32_t index __rte_unused,
221         uint32_t vmdq __rte_unused)
222 {
223         return 0;
224 }
225
226 static void
227 eth_queue_release(void *q __rte_unused) { ; }
228 static int
229 eth_link_update(struct rte_eth_dev *dev __rte_unused,
230                 int wait_to_complete __rte_unused) { return 0; }
231
232 static const struct eth_dev_ops ops = {
233         .dev_start = eth_dev_start,
234         .dev_stop = eth_dev_stop,
235         .dev_set_link_up = eth_dev_set_link_up,
236         .dev_set_link_down = eth_dev_set_link_down,
237         .dev_configure = eth_dev_configure,
238         .dev_infos_get = eth_dev_info,
239         .rx_queue_setup = eth_rx_queue_setup,
240         .tx_queue_setup = eth_tx_queue_setup,
241         .rx_queue_release = eth_queue_release,
242         .tx_queue_release = eth_queue_release,
243         .link_update = eth_link_update,
244         .stats_get = eth_stats_get,
245         .stats_reset = eth_stats_reset,
246         .mac_addr_remove = eth_mac_addr_remove,
247         .mac_addr_add = eth_mac_addr_add,
248 };
249
250 static struct rte_vdev_driver pmd_ring_drv;
251
252 static int
253 do_eth_dev_ring_create(const char *name,
254                 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
255                 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
256                 const unsigned int numa_node, enum dev_action action,
257                 struct rte_eth_dev **eth_dev_p)
258 {
259         struct rte_eth_dev_data *data = NULL;
260         struct pmd_internals *internals = NULL;
261         struct rte_eth_dev *eth_dev = NULL;
262         void **rx_queues_local = NULL;
263         void **tx_queues_local = NULL;
264         unsigned i;
265
266         PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
267                         numa_node);
268
269         rx_queues_local = rte_zmalloc_socket(name,
270                         sizeof(void *) * nb_rx_queues, 0, numa_node);
271         if (rx_queues_local == NULL) {
272                 rte_errno = ENOMEM;
273                 goto error;
274         }
275
276         tx_queues_local = rte_zmalloc_socket(name,
277                         sizeof(void *) * nb_tx_queues, 0, numa_node);
278         if (tx_queues_local == NULL) {
279                 rte_errno = ENOMEM;
280                 goto error;
281         }
282
283         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
284         if (internals == NULL) {
285                 rte_errno = ENOMEM;
286                 goto error;
287         }
288
289         /* reserve an ethdev entry */
290         eth_dev = rte_eth_dev_allocate(name);
291         if (eth_dev == NULL) {
292                 rte_errno = ENOSPC;
293                 goto error;
294         }
295
296         /* now put it all together
297          * - store queue data in internals,
298          * - store numa_node info in eth_dev_data
299          * - point eth_dev_data to internals
300          * - and point eth_dev structure to new eth_dev_data structure
301          */
302
303         data = eth_dev->data;
304         data->rx_queues = rx_queues_local;
305         data->tx_queues = tx_queues_local;
306
307         internals->action = action;
308         internals->max_rx_queues = nb_rx_queues;
309         internals->max_tx_queues = nb_tx_queues;
310         for (i = 0; i < nb_rx_queues; i++) {
311                 internals->rx_ring_queues[i].rng = rx_queues[i];
312                 data->rx_queues[i] = &internals->rx_ring_queues[i];
313         }
314         for (i = 0; i < nb_tx_queues; i++) {
315                 internals->tx_ring_queues[i].rng = tx_queues[i];
316                 data->tx_queues[i] = &internals->tx_ring_queues[i];
317         }
318
319         data->dev_private = internals;
320         data->nb_rx_queues = (uint16_t)nb_rx_queues;
321         data->nb_tx_queues = (uint16_t)nb_tx_queues;
322         data->dev_link = pmd_link;
323         data->mac_addrs = &internals->address;
324
325         eth_dev->dev_ops = &ops;
326         data->kdrv = RTE_KDRV_NONE;
327         data->numa_node = numa_node;
328
329         /* finally assign rx and tx ops */
330         eth_dev->rx_pkt_burst = eth_ring_rx;
331         eth_dev->tx_pkt_burst = eth_ring_tx;
332
333         rte_eth_dev_probing_finish(eth_dev);
334         *eth_dev_p = eth_dev;
335
336         return data->port_id;
337
338 error:
339         rte_free(rx_queues_local);
340         rte_free(tx_queues_local);
341         rte_free(internals);
342
343         return -1;
344 }
345
346 int
347 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
348                 const unsigned nb_rx_queues,
349                 struct rte_ring *const tx_queues[],
350                 const unsigned nb_tx_queues,
351                 const unsigned numa_node)
352 {
353         struct ring_internal_args args = {
354                 .rx_queues = rx_queues,
355                 .nb_rx_queues = nb_rx_queues,
356                 .tx_queues = tx_queues,
357                 .nb_tx_queues = nb_tx_queues,
358                 .numa_node = numa_node,
359                 .addr = &args,
360         };
361         char args_str[32] = { 0 };
362         char ring_name[32] = { 0 };
363         uint16_t port_id = RTE_MAX_ETHPORTS;
364         int ret;
365
366         /* do some parameter checking */
367         if (rx_queues == NULL && nb_rx_queues > 0) {
368                 rte_errno = EINVAL;
369                 return -1;
370         }
371         if (tx_queues == NULL && nb_tx_queues > 0) {
372                 rte_errno = EINVAL;
373                 return -1;
374         }
375         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
376                 rte_errno = EINVAL;
377                 return -1;
378         }
379
380         snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
381         snprintf(ring_name, 32, "net_ring_%s", name);
382
383         ret = rte_vdev_init(ring_name, args_str);
384         if (ret) {
385                 rte_errno = EINVAL;
386                 return -1;
387         }
388
389         rte_eth_dev_get_port_by_name(ring_name, &port_id);
390
391         return port_id;
392 }
393
394 int
395 rte_eth_from_ring(struct rte_ring *r)
396 {
397         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
398                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
399 }
400
401 static int
402 eth_dev_ring_create(const char *name, const unsigned numa_node,
403                 enum dev_action action, struct rte_eth_dev **eth_dev)
404 {
405         /* rx and tx are so-called from point of view of first port.
406          * They are inverted from the point of view of second port
407          */
408         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
409         unsigned i;
410         char rng_name[RTE_RING_NAMESIZE];
411         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
412                         RTE_PMD_RING_MAX_TX_RINGS);
413
414         for (i = 0; i < num_rings; i++) {
415                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
416                 rxtx[i] = (action == DEV_CREATE) ?
417                                 rte_ring_create(rng_name, 1024, numa_node,
418                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
419                                 rte_ring_lookup(rng_name);
420                 if (rxtx[i] == NULL)
421                         return -1;
422         }
423
424         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
425                 numa_node, action, eth_dev) < 0)
426                 return -1;
427
428         return 0;
429 }
430
431 struct node_action_pair {
432         char name[PATH_MAX];
433         unsigned node;
434         enum dev_action action;
435 };
436
437 struct node_action_list {
438         unsigned total;
439         unsigned count;
440         struct node_action_pair *list;
441 };
442
443 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
444 {
445         struct node_action_list *info = data;
446         int ret;
447         char *name;
448         char *action;
449         char *node;
450         char *end;
451
452         name = strdup(value);
453
454         ret = -EINVAL;
455
456         if (!name) {
457                 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
458                 goto out;
459         }
460
461         node = strchr(name, ':');
462         if (!node) {
463                 PMD_LOG(WARNING, "could not parse node value from %s",
464                         name);
465                 goto out;
466         }
467
468         *node = '\0';
469         node++;
470
471         action = strchr(node, ':');
472         if (!action) {
473                 PMD_LOG(WARNING, "could not parse action value from %s",
474                         node);
475                 goto out;
476         }
477
478         *action = '\0';
479         action++;
480
481         /*
482          * Need to do some sanity checking here
483          */
484
485         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
486                 info->list[info->count].action = DEV_ATTACH;
487         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
488                 info->list[info->count].action = DEV_CREATE;
489         else
490                 goto out;
491
492         errno = 0;
493         info->list[info->count].node = strtol(node, &end, 10);
494
495         if ((errno != 0) || (*end != '\0')) {
496                 PMD_LOG(WARNING,
497                         "node value %s is unparseable as a number", node);
498                 goto out;
499         }
500
501         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
502
503         info->count++;
504
505         ret = 0;
506 out:
507         free(name);
508         return ret;
509 }
510
511 static int
512 parse_internal_args(const char *key __rte_unused, const char *value,
513                 void *data)
514 {
515         struct ring_internal_args **internal_args = data;
516         void *args;
517
518         sscanf(value, "%p", &args);
519
520         *internal_args = args;
521
522         if ((*internal_args)->addr != args)
523                 return -1;
524
525         return 0;
526 }
527
528 static int
529 rte_pmd_ring_probe(struct rte_vdev_device *dev)
530 {
531         const char *name, *params;
532         struct rte_kvargs *kvlist = NULL;
533         int ret = 0;
534         struct node_action_list *info = NULL;
535         struct rte_eth_dev *eth_dev = NULL;
536         struct ring_internal_args *internal_args;
537
538         name = rte_vdev_device_name(dev);
539         params = rte_vdev_device_args(dev);
540
541         PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
542
543         if (params == NULL || params[0] == '\0') {
544                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
545                                 &eth_dev);
546                 if (ret == -1) {
547                         PMD_LOG(INFO,
548                                 "Attach to pmd_ring for %s", name);
549                         ret = eth_dev_ring_create(name, rte_socket_id(),
550                                                   DEV_ATTACH, &eth_dev);
551                 }
552         } else {
553                 kvlist = rte_kvargs_parse(params, valid_arguments);
554
555                 if (!kvlist) {
556                         PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
557                                         " rings-backed ethernet device");
558                         ret = eth_dev_ring_create(name, rte_socket_id(),
559                                                   DEV_CREATE, &eth_dev);
560                         if (ret == -1) {
561                                 PMD_LOG(INFO,
562                                         "Attach to pmd_ring for %s",
563                                         name);
564                                 ret = eth_dev_ring_create(name, rte_socket_id(),
565                                                           DEV_ATTACH, &eth_dev);
566                         }
567
568                         if (eth_dev)
569                                 eth_dev->device = &dev->device;
570
571                         return ret;
572                 }
573
574                 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
575                         ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
576                                                  parse_internal_args,
577                                                  &internal_args);
578                         if (ret < 0)
579                                 goto out_free;
580
581                         ret = do_eth_dev_ring_create(name,
582                                 internal_args->rx_queues,
583                                 internal_args->nb_rx_queues,
584                                 internal_args->tx_queues,
585                                 internal_args->nb_tx_queues,
586                                 internal_args->numa_node,
587                                 DEV_ATTACH,
588                                 &eth_dev);
589                         if (ret >= 0)
590                                 ret = 0;
591                 } else {
592                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
593                         info = rte_zmalloc("struct node_action_list",
594                                            sizeof(struct node_action_list) +
595                                            (sizeof(struct node_action_pair) * ret),
596                                            0);
597                         if (!info)
598                                 goto out_free;
599
600                         info->total = ret;
601                         info->list = (struct node_action_pair*)(info + 1);
602
603                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
604                                                  parse_kvlist, info);
605
606                         if (ret < 0)
607                                 goto out_free;
608
609                         for (info->count = 0; info->count < info->total; info->count++) {
610                                 ret = eth_dev_ring_create(info->list[info->count].name,
611                                                           info->list[info->count].node,
612                                                           info->list[info->count].action,
613                                                           &eth_dev);
614                                 if ((ret == -1) &&
615                                     (info->list[info->count].action == DEV_CREATE)) {
616                                         PMD_LOG(INFO,
617                                                 "Attach to pmd_ring for %s",
618                                                 name);
619                                         ret = eth_dev_ring_create(name,
620                                                         info->list[info->count].node,
621                                                         DEV_ATTACH,
622                                                         &eth_dev);
623                                 }
624                         }
625                 }
626         }
627
628         if (eth_dev)
629                 eth_dev->device = &dev->device;
630
631 out_free:
632         rte_kvargs_free(kvlist);
633         rte_free(info);
634         return ret;
635 }
636
637 static int
638 rte_pmd_ring_remove(struct rte_vdev_device *dev)
639 {
640         const char *name = rte_vdev_device_name(dev);
641         struct rte_eth_dev *eth_dev = NULL;
642         struct pmd_internals *internals = NULL;
643         struct ring_queue *r = NULL;
644         uint16_t i;
645
646         PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
647
648         if (name == NULL)
649                 return -EINVAL;
650
651         /* find an ethdev entry */
652         eth_dev = rte_eth_dev_allocated(name);
653         if (eth_dev == NULL)
654                 return -ENODEV;
655
656         eth_dev_stop(eth_dev);
657
658         internals = eth_dev->data->dev_private;
659         if (internals->action == DEV_CREATE) {
660                 /*
661                  * it is only necessary to delete the rings in rx_queues because
662                  * they are the same used in tx_queues
663                  */
664                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
665                         r = eth_dev->data->rx_queues[i];
666                         rte_ring_free(r->rng);
667                 }
668         }
669
670         rte_free(eth_dev->data->rx_queues);
671         rte_free(eth_dev->data->tx_queues);
672         rte_free(eth_dev->data->dev_private);
673
674         rte_eth_dev_release_port(eth_dev);
675         return 0;
676 }
677
678 static struct rte_vdev_driver pmd_ring_drv = {
679         .probe = rte_pmd_ring_probe,
680         .remove = rte_pmd_ring_remove,
681 };
682
683 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
684 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
685 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
686         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
687
688 RTE_INIT(eth_ring_init_log)
689 {
690         eth_ring_logtype = rte_log_register("pmd.net.ring");
691         if (eth_ring_logtype >= 0)
692                 rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
693 }