New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / enic / enic_ethdev.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <stdio.h>
36 #include <stdint.h>
37
38 #include <rte_dev.h>
39 #include <rte_pci.h>
40 #include <rte_ethdev.h>
41 #include <rte_kvargs.h>
42 #include <rte_string_fns.h>
43
44 #include "vnic_intr.h"
45 #include "vnic_cq.h"
46 #include "vnic_wq.h"
47 #include "vnic_rq.h"
48 #include "vnic_enet.h"
49 #include "enic.h"
50
51 #ifdef RTE_LIBRTE_ENIC_DEBUG
52 #define ENICPMD_FUNC_TRACE() \
53         RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
54 #else
55 #define ENICPMD_FUNC_TRACE() (void)0
56 #endif
57
58 /*
59  * The set of PCI devices this driver supports
60  */
61 #define CISCO_PCI_VENDOR_ID 0x1137
62 static const struct rte_pci_id pci_id_enic_map[] = {
63         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
64         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
65         {.vendor_id = 0, /* sentinel */},
66 };
67
68 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
69
70 static int
71 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
72                         enum rte_filter_op filter_op, void *arg)
73 {
74         struct enic *enic = pmd_priv(eth_dev);
75         int ret = 0;
76
77         ENICPMD_FUNC_TRACE();
78         if (filter_op == RTE_ETH_FILTER_NOP)
79                 return 0;
80
81         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
82                 return -EINVAL;
83
84         switch (filter_op) {
85         case RTE_ETH_FILTER_ADD:
86         case RTE_ETH_FILTER_UPDATE:
87                 ret = enic_fdir_add_fltr(enic,
88                         (struct rte_eth_fdir_filter *)arg);
89                 break;
90
91         case RTE_ETH_FILTER_DELETE:
92                 ret = enic_fdir_del_fltr(enic,
93                         (struct rte_eth_fdir_filter *)arg);
94                 break;
95
96         case RTE_ETH_FILTER_STATS:
97                 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
98                 break;
99
100         case RTE_ETH_FILTER_FLUSH:
101                 dev_warning(enic, "unsupported operation %u", filter_op);
102                 ret = -ENOTSUP;
103                 break;
104         case RTE_ETH_FILTER_INFO:
105                 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
106                 break;
107         default:
108                 dev_err(enic, "unknown operation %u", filter_op);
109                 ret = -EINVAL;
110                 break;
111         }
112         return ret;
113 }
114
115 static int
116 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
117                      enum rte_filter_type filter_type,
118                      enum rte_filter_op filter_op,
119                      void *arg)
120 {
121         int ret = -EINVAL;
122
123         if (RTE_ETH_FILTER_FDIR == filter_type)
124                 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
125         else
126                 dev_warning(enic, "Filter type (%d) not supported",
127                         filter_type);
128
129         return ret;
130 }
131
132 static void enicpmd_dev_tx_queue_release(void *txq)
133 {
134         ENICPMD_FUNC_TRACE();
135         enic_free_wq(txq);
136 }
137
138 static int enicpmd_dev_setup_intr(struct enic *enic)
139 {
140         int ret;
141         unsigned int index;
142
143         ENICPMD_FUNC_TRACE();
144
145         /* Are we done with the init of all the queues? */
146         for (index = 0; index < enic->cq_count; index++) {
147                 if (!enic->cq[index].ctrl)
148                         break;
149         }
150         if (enic->cq_count != index)
151                 return 0;
152         for (index = 0; index < enic->wq_count; index++) {
153                 if (!enic->wq[index].ctrl)
154                         break;
155         }
156         if (enic->wq_count != index)
157                 return 0;
158         /* check start of packet (SOP) RQs only in case scatter is disabled. */
159         for (index = 0; index < enic->rq_count; index++) {
160                 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
161                         break;
162         }
163         if (enic->rq_count != index)
164                 return 0;
165
166         ret = enic_alloc_intr_resources(enic);
167         if (ret) {
168                 dev_err(enic, "alloc intr failed\n");
169                 return ret;
170         }
171         enic_init_vnic_resources(enic);
172
173         ret = enic_setup_finish(enic);
174         if (ret)
175                 dev_err(enic, "setup could not be finished\n");
176
177         return ret;
178 }
179
180 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
181         uint16_t queue_idx,
182         uint16_t nb_desc,
183         unsigned int socket_id,
184         __rte_unused const struct rte_eth_txconf *tx_conf)
185 {
186         int ret;
187         struct enic *enic = pmd_priv(eth_dev);
188
189         ENICPMD_FUNC_TRACE();
190         RTE_ASSERT(queue_idx < enic->conf_wq_count);
191         eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
192
193         ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
194         if (ret) {
195                 dev_err(enic, "error in allocating wq\n");
196                 return ret;
197         }
198
199         return enicpmd_dev_setup_intr(enic);
200 }
201
202 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
203         uint16_t queue_idx)
204 {
205         struct enic *enic = pmd_priv(eth_dev);
206
207         ENICPMD_FUNC_TRACE();
208
209         enic_start_wq(enic, queue_idx);
210
211         return 0;
212 }
213
214 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
215         uint16_t queue_idx)
216 {
217         int ret;
218         struct enic *enic = pmd_priv(eth_dev);
219
220         ENICPMD_FUNC_TRACE();
221
222         ret = enic_stop_wq(enic, queue_idx);
223         if (ret)
224                 dev_err(enic, "error in stopping wq %d\n", queue_idx);
225
226         return ret;
227 }
228
229 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
230         uint16_t queue_idx)
231 {
232         struct enic *enic = pmd_priv(eth_dev);
233
234         ENICPMD_FUNC_TRACE();
235
236         enic_start_rq(enic, queue_idx);
237
238         return 0;
239 }
240
241 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
242         uint16_t queue_idx)
243 {
244         int ret;
245         struct enic *enic = pmd_priv(eth_dev);
246
247         ENICPMD_FUNC_TRACE();
248
249         ret = enic_stop_rq(enic, queue_idx);
250         if (ret)
251                 dev_err(enic, "error in stopping rq %d\n", queue_idx);
252
253         return ret;
254 }
255
256 static void enicpmd_dev_rx_queue_release(void *rxq)
257 {
258         ENICPMD_FUNC_TRACE();
259         enic_free_rq(rxq);
260 }
261
262 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
263                                            uint16_t rx_queue_id)
264 {
265         struct enic *enic = pmd_priv(dev);
266         uint32_t queue_count = 0;
267         struct vnic_cq *cq;
268         uint32_t cq_tail;
269         uint16_t cq_idx;
270         int rq_num;
271
272         if (rx_queue_id >= dev->data->nb_rx_queues) {
273                 dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
274                 return 0;
275         }
276
277         rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
278         cq = &enic->cq[enic_cq_rq(enic, rq_num)];
279         cq_idx = cq->to_clean;
280
281         cq_tail = ioread32(&cq->ctrl->cq_tail);
282
283         if (cq_tail < cq_idx)
284                 cq_tail += cq->ring.desc_count;
285
286         queue_count = cq_tail - cq_idx;
287
288         return queue_count;
289 }
290
291 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
292         uint16_t queue_idx,
293         uint16_t nb_desc,
294         unsigned int socket_id,
295         const struct rte_eth_rxconf *rx_conf,
296         struct rte_mempool *mp)
297 {
298         int ret;
299         struct enic *enic = pmd_priv(eth_dev);
300
301         ENICPMD_FUNC_TRACE();
302
303         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
304         eth_dev->data->rx_queues[queue_idx] =
305                 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
306
307         ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
308                             rx_conf->rx_free_thresh);
309         if (ret) {
310                 dev_err(enic, "error in allocating rq\n");
311                 return ret;
312         }
313
314         return enicpmd_dev_setup_intr(enic);
315 }
316
317 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
318         uint16_t vlan_id, int on)
319 {
320         struct enic *enic = pmd_priv(eth_dev);
321         int err;
322
323         ENICPMD_FUNC_TRACE();
324         if (on)
325                 err = enic_add_vlan(enic, vlan_id);
326         else
327                 err = enic_del_vlan(enic, vlan_id);
328         return err;
329 }
330
331 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
332 {
333         struct enic *enic = pmd_priv(eth_dev);
334
335         ENICPMD_FUNC_TRACE();
336
337         if (mask & ETH_VLAN_STRIP_MASK) {
338                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
339                         enic->ig_vlan_strip_en = 1;
340                 else
341                         enic->ig_vlan_strip_en = 0;
342         }
343         enic_set_rss_nic_cfg(enic);
344
345
346         if (mask & ETH_VLAN_FILTER_MASK) {
347                 dev_warning(enic,
348                         "Configuration of VLAN filter is not supported\n");
349         }
350
351         if (mask & ETH_VLAN_EXTEND_MASK) {
352                 dev_warning(enic,
353                         "Configuration of extended VLAN is not supported\n");
354         }
355 }
356
357 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
358 {
359         int ret;
360         struct enic *enic = pmd_priv(eth_dev);
361
362         ENICPMD_FUNC_TRACE();
363         ret = enic_set_vnic_res(enic);
364         if (ret) {
365                 dev_err(enic, "Set vNIC resource num  failed, aborting\n");
366                 return ret;
367         }
368
369         if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
370                 eth_dev->data->dev_conf.rxmode.header_split) {
371                 /* Enable header-data-split */
372                 enic_set_hdr_split_size(enic,
373                         eth_dev->data->dev_conf.rxmode.split_hdr_size);
374         }
375
376         enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
377         enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
378         return 0;
379 }
380
381 /* Start the device.
382  * It returns 0 on success.
383  */
384 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
385 {
386         struct enic *enic = pmd_priv(eth_dev);
387
388         ENICPMD_FUNC_TRACE();
389         return enic_enable(enic);
390 }
391
392 /*
393  * Stop device: disable rx and tx functions to allow for reconfiguring.
394  */
395 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
396 {
397         struct rte_eth_link link;
398         struct enic *enic = pmd_priv(eth_dev);
399
400         ENICPMD_FUNC_TRACE();
401         enic_disable(enic);
402         memset(&link, 0, sizeof(link));
403         rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
404                 *(uint64_t *)&eth_dev->data->dev_link,
405                 *(uint64_t *)&link);
406 }
407
408 /*
409  * Stop device.
410  */
411 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
412 {
413         struct enic *enic = pmd_priv(eth_dev);
414
415         ENICPMD_FUNC_TRACE();
416         enic_remove(enic);
417 }
418
419 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
420         __rte_unused int wait_to_complete)
421 {
422         struct enic *enic = pmd_priv(eth_dev);
423
424         ENICPMD_FUNC_TRACE();
425         return enic_link_update(enic);
426 }
427
428 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
429         struct rte_eth_stats *stats)
430 {
431         struct enic *enic = pmd_priv(eth_dev);
432
433         ENICPMD_FUNC_TRACE();
434         enic_dev_stats_get(enic, stats);
435 }
436
437 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
438 {
439         struct enic *enic = pmd_priv(eth_dev);
440
441         ENICPMD_FUNC_TRACE();
442         enic_dev_stats_clear(enic);
443 }
444
445 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
446         struct rte_eth_dev_info *device_info)
447 {
448         struct enic *enic = pmd_priv(eth_dev);
449
450         ENICPMD_FUNC_TRACE();
451         /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
452         device_info->max_rx_queues = enic->conf_rq_count / 2;
453         device_info->max_tx_queues = enic->conf_wq_count;
454         device_info->min_rx_bufsize = ENIC_MIN_MTU;
455         device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
456         device_info->max_mac_addrs = 1;
457         device_info->rx_offload_capa =
458                 DEV_RX_OFFLOAD_VLAN_STRIP |
459                 DEV_RX_OFFLOAD_IPV4_CKSUM |
460                 DEV_RX_OFFLOAD_UDP_CKSUM  |
461                 DEV_RX_OFFLOAD_TCP_CKSUM;
462         device_info->tx_offload_capa =
463                 DEV_TX_OFFLOAD_VLAN_INSERT |
464                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
465                 DEV_TX_OFFLOAD_UDP_CKSUM   |
466                 DEV_TX_OFFLOAD_TCP_CKSUM;
467         device_info->default_rxconf = (struct rte_eth_rxconf) {
468                 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
469         };
470 }
471
472 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
473 {
474         static const uint32_t ptypes[] = {
475                 RTE_PTYPE_L2_ETHER,
476                 RTE_PTYPE_L2_ETHER_VLAN,
477                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
478                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
479                 RTE_PTYPE_L4_TCP,
480                 RTE_PTYPE_L4_UDP,
481                 RTE_PTYPE_L4_FRAG,
482                 RTE_PTYPE_L4_NONFRAG,
483                 RTE_PTYPE_UNKNOWN
484         };
485
486         if (dev->rx_pkt_burst == enic_recv_pkts)
487                 return ptypes;
488         return NULL;
489 }
490
491 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
492 {
493         struct enic *enic = pmd_priv(eth_dev);
494
495         ENICPMD_FUNC_TRACE();
496         enic->promisc = 1;
497         enic_add_packet_filter(enic);
498 }
499
500 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
501 {
502         struct enic *enic = pmd_priv(eth_dev);
503
504         ENICPMD_FUNC_TRACE();
505         enic->promisc = 0;
506         enic_add_packet_filter(enic);
507 }
508
509 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
510 {
511         struct enic *enic = pmd_priv(eth_dev);
512
513         ENICPMD_FUNC_TRACE();
514         enic->allmulti = 1;
515         enic_add_packet_filter(enic);
516 }
517
518 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
519 {
520         struct enic *enic = pmd_priv(eth_dev);
521
522         ENICPMD_FUNC_TRACE();
523         enic->allmulti = 0;
524         enic_add_packet_filter(enic);
525 }
526
527 static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
528         struct ether_addr *mac_addr,
529         __rte_unused uint32_t index, __rte_unused uint32_t pool)
530 {
531         struct enic *enic = pmd_priv(eth_dev);
532
533         ENICPMD_FUNC_TRACE();
534         enic_set_mac_address(enic, mac_addr->addr_bytes);
535 }
536
537 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
538 {
539         struct enic *enic = pmd_priv(eth_dev);
540
541         ENICPMD_FUNC_TRACE();
542         enic_del_mac_address(enic);
543 }
544
545 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
546 {
547         struct enic *enic = pmd_priv(eth_dev);
548
549         ENICPMD_FUNC_TRACE();
550         return enic_set_mtu(enic, mtu);
551 }
552
553 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
554         .dev_configure        = enicpmd_dev_configure,
555         .dev_start            = enicpmd_dev_start,
556         .dev_stop             = enicpmd_dev_stop,
557         .dev_set_link_up      = NULL,
558         .dev_set_link_down    = NULL,
559         .dev_close            = enicpmd_dev_close,
560         .promiscuous_enable   = enicpmd_dev_promiscuous_enable,
561         .promiscuous_disable  = enicpmd_dev_promiscuous_disable,
562         .allmulticast_enable  = enicpmd_dev_allmulticast_enable,
563         .allmulticast_disable = enicpmd_dev_allmulticast_disable,
564         .link_update          = enicpmd_dev_link_update,
565         .stats_get            = enicpmd_dev_stats_get,
566         .stats_reset          = enicpmd_dev_stats_reset,
567         .queue_stats_mapping_set = NULL,
568         .dev_infos_get        = enicpmd_dev_info_get,
569         .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
570         .mtu_set              = enicpmd_mtu_set,
571         .vlan_filter_set      = enicpmd_vlan_filter_set,
572         .vlan_tpid_set        = NULL,
573         .vlan_offload_set     = enicpmd_vlan_offload_set,
574         .vlan_strip_queue_set = NULL,
575         .rx_queue_start       = enicpmd_dev_rx_queue_start,
576         .rx_queue_stop        = enicpmd_dev_rx_queue_stop,
577         .tx_queue_start       = enicpmd_dev_tx_queue_start,
578         .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
579         .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
580         .rx_queue_release     = enicpmd_dev_rx_queue_release,
581         .rx_queue_count       = enicpmd_dev_rx_queue_count,
582         .rx_descriptor_done   = NULL,
583         .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
584         .tx_queue_release     = enicpmd_dev_tx_queue_release,
585         .dev_led_on           = NULL,
586         .dev_led_off          = NULL,
587         .flow_ctrl_get        = NULL,
588         .flow_ctrl_set        = NULL,
589         .priority_flow_ctrl_set = NULL,
590         .mac_addr_add         = enicpmd_add_mac_addr,
591         .mac_addr_remove      = enicpmd_remove_mac_addr,
592         .filter_ctrl          = enicpmd_dev_filter_ctrl,
593 };
594
595 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
596                                       const char *value,
597                                       void *opaque)
598 {
599         struct enic *enic;
600
601         enic = (struct enic *)opaque;
602         if (strcmp(value, "trunk") == 0) {
603                 /* Trunk mode: always tag */
604                 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
605         } else if (strcmp(value, "untag") == 0) {
606                 /* Untag default VLAN mode: untag if VLAN = default VLAN */
607                 enic->ig_vlan_rewrite_mode =
608                         IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
609         } else if (strcmp(value, "priority") == 0) {
610                 /*
611                  * Priority-tag default VLAN mode: priority tag (VLAN header
612                  * with ID=0) if VLAN = default
613                  */
614                 enic->ig_vlan_rewrite_mode =
615                         IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
616         } else if (strcmp(value, "pass") == 0) {
617                 /* Pass through mode: do not touch tags */
618                 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
619         } else {
620                 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
621                         ": expected=trunk|untag|priority|pass given=%s\n",
622                         value);
623                 return -EINVAL;
624         }
625         return 0;
626 }
627
628 static int enic_check_devargs(struct rte_eth_dev *dev)
629 {
630         static const char *valid_keys[] = {
631                 ENIC_DEVARG_IG_VLAN_REWRITE,
632                 NULL};
633         struct enic *enic = pmd_priv(dev);
634         struct rte_kvargs *kvlist;
635
636         ENICPMD_FUNC_TRACE();
637
638         enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
639         if (!dev->pci_dev->device.devargs)
640                 return 0;
641         kvlist = rte_kvargs_parse(dev->pci_dev->device.devargs->args, valid_keys);
642         if (!kvlist)
643                 return -EINVAL;
644         if (rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
645                                enic_parse_ig_vlan_rewrite, enic) < 0) {
646                 rte_kvargs_free(kvlist);
647                 return -EINVAL;
648         }
649         rte_kvargs_free(kvlist);
650         return 0;
651 }
652
653 /* Initialize the driver
654  * It returns 0 on success.
655  */
656 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
657 {
658         struct rte_pci_device *pdev;
659         struct rte_pci_addr *addr;
660         struct enic *enic = pmd_priv(eth_dev);
661         int err;
662
663         ENICPMD_FUNC_TRACE();
664
665         enic->port_id = eth_dev->data->port_id;
666         enic->rte_dev = eth_dev;
667         eth_dev->dev_ops = &enicpmd_eth_dev_ops;
668         eth_dev->rx_pkt_burst = &enic_recv_pkts;
669         eth_dev->tx_pkt_burst = &enic_xmit_pkts;
670
671         pdev = eth_dev->pci_dev;
672         rte_eth_copy_pci_info(eth_dev, pdev);
673         enic->pdev = pdev;
674         addr = &pdev->addr;
675
676         snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
677                 addr->domain, addr->bus, addr->devid, addr->function);
678
679         err = enic_check_devargs(eth_dev);
680         if (err)
681                 return err;
682         return enic_probe(enic);
683 }
684
685 static struct eth_driver rte_enic_pmd = {
686         .pci_drv = {
687                 .id_table = pci_id_enic_map,
688                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
689                 .probe = rte_eth_dev_pci_probe,
690                 .remove = rte_eth_dev_pci_remove,
691         },
692         .eth_dev_init = eth_enicpmd_dev_init,
693         .dev_private_size = sizeof(struct enic),
694 };
695
696 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
697 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
698 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
699         ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");