New upstream version 18.11-rc3
[deb_dpdk.git] / drivers / net / ena / ena_ethdev.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_ether.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_pci.h>
37 #include <rte_tcp.h>
38 #include <rte_atomic.h>
39 #include <rte_dev.h>
40 #include <rte_errno.h>
41 #include <rte_version.h>
42 #include <rte_eal_memconfig.h>
43 #include <rte_net.h>
44
45 #include "ena_ethdev.h"
46 #include "ena_logs.h"
47 #include "ena_platform.h"
48 #include "ena_com.h"
49 #include "ena_eth_com.h"
50
51 #include <ena_common_defs.h>
52 #include <ena_regs_defs.h>
53 #include <ena_admin_defs.h>
54 #include <ena_eth_io_defs.h>
55
56 #define DRV_MODULE_VER_MAJOR    1
57 #define DRV_MODULE_VER_MINOR    1
58 #define DRV_MODULE_VER_SUBMINOR 1
59
60 #define ENA_IO_TXQ_IDX(q)       (2 * (q))
61 #define ENA_IO_RXQ_IDX(q)       (2 * (q) + 1)
62 /*reverse version of ENA_IO_RXQ_IDX*/
63 #define ENA_IO_RXQ_IDX_REV(q)   ((q - 1) / 2)
64
65 /* While processing submitted and completed descriptors (rx and tx path
66  * respectively) in a loop it is desired to:
67  *  - perform batch submissions while populating sumbissmion queue
68  *  - avoid blocking transmission of other packets during cleanup phase
69  * Hence the utilization ratio of 1/8 of a queue size.
70  */
71 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8)
72
73 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
74 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
75
76 #define GET_L4_HDR_LEN(mbuf)                                    \
77         ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *,       \
78                 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
79
80 #define ENA_RX_RSS_TABLE_LOG_SIZE  7
81 #define ENA_RX_RSS_TABLE_SIZE   (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
82 #define ENA_HASH_KEY_SIZE       40
83 #define ENA_ETH_SS_STATS        0xFF
84 #define ETH_GSTRING_LEN 32
85
86 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
87
88 #define ENA_MAX_RING_DESC       ENA_DEFAULT_RING_SIZE
89 #define ENA_MIN_RING_DESC       128
90
91 enum ethtool_stringset {
92         ETH_SS_TEST             = 0,
93         ETH_SS_STATS,
94 };
95
96 struct ena_stats {
97         char name[ETH_GSTRING_LEN];
98         int stat_offset;
99 };
100
101 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
102         .name = #stat, \
103         .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
104 }
105
106 #define ENA_STAT_ENTRY(stat, stat_type) { \
107         .name = #stat, \
108         .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
109 }
110
111 #define ENA_STAT_RX_ENTRY(stat) \
112         ENA_STAT_ENTRY(stat, rx)
113
114 #define ENA_STAT_TX_ENTRY(stat) \
115         ENA_STAT_ENTRY(stat, tx)
116
117 #define ENA_STAT_GLOBAL_ENTRY(stat) \
118         ENA_STAT_ENTRY(stat, dev)
119
120 /*
121  * Each rte_memzone should have unique name.
122  * To satisfy it, count number of allocation and add it to name.
123  */
124 uint32_t ena_alloc_cnt;
125
126 static const struct ena_stats ena_stats_global_strings[] = {
127         ENA_STAT_GLOBAL_ENTRY(tx_timeout),
128         ENA_STAT_GLOBAL_ENTRY(io_suspend),
129         ENA_STAT_GLOBAL_ENTRY(io_resume),
130         ENA_STAT_GLOBAL_ENTRY(wd_expired),
131         ENA_STAT_GLOBAL_ENTRY(interface_up),
132         ENA_STAT_GLOBAL_ENTRY(interface_down),
133         ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
134 };
135
136 static const struct ena_stats ena_stats_tx_strings[] = {
137         ENA_STAT_TX_ENTRY(cnt),
138         ENA_STAT_TX_ENTRY(bytes),
139         ENA_STAT_TX_ENTRY(queue_stop),
140         ENA_STAT_TX_ENTRY(queue_wakeup),
141         ENA_STAT_TX_ENTRY(dma_mapping_err),
142         ENA_STAT_TX_ENTRY(linearize),
143         ENA_STAT_TX_ENTRY(linearize_failed),
144         ENA_STAT_TX_ENTRY(tx_poll),
145         ENA_STAT_TX_ENTRY(doorbells),
146         ENA_STAT_TX_ENTRY(prepare_ctx_err),
147         ENA_STAT_TX_ENTRY(missing_tx_comp),
148         ENA_STAT_TX_ENTRY(bad_req_id),
149 };
150
151 static const struct ena_stats ena_stats_rx_strings[] = {
152         ENA_STAT_RX_ENTRY(cnt),
153         ENA_STAT_RX_ENTRY(bytes),
154         ENA_STAT_RX_ENTRY(refil_partial),
155         ENA_STAT_RX_ENTRY(bad_csum),
156         ENA_STAT_RX_ENTRY(page_alloc_fail),
157         ENA_STAT_RX_ENTRY(skb_alloc_fail),
158         ENA_STAT_RX_ENTRY(dma_mapping_err),
159         ENA_STAT_RX_ENTRY(bad_desc_num),
160         ENA_STAT_RX_ENTRY(small_copy_len_pkt),
161 };
162
163 static const struct ena_stats ena_stats_ena_com_strings[] = {
164         ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
165         ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
166         ENA_STAT_ENA_COM_ENTRY(completed_cmd),
167         ENA_STAT_ENA_COM_ENTRY(out_of_space),
168         ENA_STAT_ENA_COM_ENTRY(no_completion),
169 };
170
171 #define ENA_STATS_ARRAY_GLOBAL  ARRAY_SIZE(ena_stats_global_strings)
172 #define ENA_STATS_ARRAY_TX      ARRAY_SIZE(ena_stats_tx_strings)
173 #define ENA_STATS_ARRAY_RX      ARRAY_SIZE(ena_stats_rx_strings)
174 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
175
176 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
177                         DEV_TX_OFFLOAD_UDP_CKSUM |\
178                         DEV_TX_OFFLOAD_IPV4_CKSUM |\
179                         DEV_TX_OFFLOAD_TCP_TSO)
180 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
181                        PKT_TX_IP_CKSUM |\
182                        PKT_TX_TCP_SEG)
183
184 /** Vendor ID used by Amazon devices */
185 #define PCI_VENDOR_ID_AMAZON 0x1D0F
186 /** Amazon devices */
187 #define PCI_DEVICE_ID_ENA_VF    0xEC20
188 #define PCI_DEVICE_ID_ENA_LLQ_VF        0xEC21
189
190 #define ENA_TX_OFFLOAD_MASK     (\
191         PKT_TX_L4_MASK |         \
192         PKT_TX_IP_CKSUM |        \
193         PKT_TX_TCP_SEG)
194
195 #define ENA_TX_OFFLOAD_NOTSUP_MASK      \
196         (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
197
198 int ena_logtype_init;
199 int ena_logtype_driver;
200
201 static const struct rte_pci_id pci_id_ena_map[] = {
202         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
203         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
204         { .device_id = 0 },
205 };
206
207 static struct ena_aenq_handlers aenq_handlers;
208
209 static int ena_device_init(struct ena_com_dev *ena_dev,
210                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
211                            bool *wd_state);
212 static int ena_dev_configure(struct rte_eth_dev *dev);
213 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
214                                   uint16_t nb_pkts);
215 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
216                 uint16_t nb_pkts);
217 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
218                               uint16_t nb_desc, unsigned int socket_id,
219                               const struct rte_eth_txconf *tx_conf);
220 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
221                               uint16_t nb_desc, unsigned int socket_id,
222                               const struct rte_eth_rxconf *rx_conf,
223                               struct rte_mempool *mp);
224 static uint16_t eth_ena_recv_pkts(void *rx_queue,
225                                   struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
226 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
227 static void ena_init_rings(struct ena_adapter *adapter);
228 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
229 static int ena_start(struct rte_eth_dev *dev);
230 static void ena_stop(struct rte_eth_dev *dev);
231 static void ena_close(struct rte_eth_dev *dev);
232 static int ena_dev_reset(struct rte_eth_dev *dev);
233 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
234 static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
235 static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
236 static void ena_rx_queue_release(void *queue);
237 static void ena_tx_queue_release(void *queue);
238 static void ena_rx_queue_release_bufs(struct ena_ring *ring);
239 static void ena_tx_queue_release_bufs(struct ena_ring *ring);
240 static int ena_link_update(struct rte_eth_dev *dev,
241                            int wait_to_complete);
242 static int ena_create_io_queue(struct ena_ring *ring);
243 static void ena_free_io_queues_all(struct ena_adapter *adapter);
244 static int ena_queue_restart(struct ena_ring *ring);
245 static int ena_queue_restart_all(struct rte_eth_dev *dev,
246                                  enum ena_ring_type ring_type);
247 static void ena_stats_restart(struct rte_eth_dev *dev);
248 static void ena_infos_get(struct rte_eth_dev *dev,
249                           struct rte_eth_dev_info *dev_info);
250 static int ena_rss_reta_update(struct rte_eth_dev *dev,
251                                struct rte_eth_rss_reta_entry64 *reta_conf,
252                                uint16_t reta_size);
253 static int ena_rss_reta_query(struct rte_eth_dev *dev,
254                               struct rte_eth_rss_reta_entry64 *reta_conf,
255                               uint16_t reta_size);
256 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
257 static void ena_interrupt_handler_rte(void *cb_arg);
258 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
259
260 static const struct eth_dev_ops ena_dev_ops = {
261         .dev_configure        = ena_dev_configure,
262         .dev_infos_get        = ena_infos_get,
263         .rx_queue_setup       = ena_rx_queue_setup,
264         .tx_queue_setup       = ena_tx_queue_setup,
265         .dev_start            = ena_start,
266         .dev_stop             = ena_stop,
267         .link_update          = ena_link_update,
268         .stats_get            = ena_stats_get,
269         .mtu_set              = ena_mtu_set,
270         .rx_queue_release     = ena_rx_queue_release,
271         .tx_queue_release     = ena_tx_queue_release,
272         .dev_close            = ena_close,
273         .dev_reset            = ena_dev_reset,
274         .reta_update          = ena_rss_reta_update,
275         .reta_query           = ena_rss_reta_query,
276 };
277
278 #define NUMA_NO_NODE    SOCKET_ID_ANY
279
280 static inline int ena_cpu_to_node(int cpu)
281 {
282         struct rte_config *config = rte_eal_get_configuration();
283         struct rte_fbarray *arr = &config->mem_config->memzones;
284         const struct rte_memzone *mz;
285
286         if (unlikely(cpu >= RTE_MAX_MEMZONE))
287                 return NUMA_NO_NODE;
288
289         mz = rte_fbarray_get(arr, cpu);
290
291         return mz->socket_id;
292 }
293
294 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
295                                        struct ena_com_rx_ctx *ena_rx_ctx)
296 {
297         uint64_t ol_flags = 0;
298         uint32_t packet_type = 0;
299
300         if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
301                 packet_type |= RTE_PTYPE_L4_TCP;
302         else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
303                 packet_type |= RTE_PTYPE_L4_UDP;
304
305         if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
306                 packet_type |= RTE_PTYPE_L3_IPV4;
307         else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
308                 packet_type |= RTE_PTYPE_L3_IPV6;
309
310         if (unlikely(ena_rx_ctx->l4_csum_err))
311                 ol_flags |= PKT_RX_L4_CKSUM_BAD;
312         if (unlikely(ena_rx_ctx->l3_csum_err))
313                 ol_flags |= PKT_RX_IP_CKSUM_BAD;
314
315         mbuf->ol_flags = ol_flags;
316         mbuf->packet_type = packet_type;
317 }
318
319 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
320                                        struct ena_com_tx_ctx *ena_tx_ctx,
321                                        uint64_t queue_offloads)
322 {
323         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
324
325         if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
326             (queue_offloads & QUEUE_OFFLOADS)) {
327                 /* check if TSO is required */
328                 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
329                     (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
330                         ena_tx_ctx->tso_enable = true;
331
332                         ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
333                 }
334
335                 /* check if L3 checksum is needed */
336                 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
337                     (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
338                         ena_tx_ctx->l3_csum_enable = true;
339
340                 if (mbuf->ol_flags & PKT_TX_IPV6) {
341                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
342                 } else {
343                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
344
345                         /* set don't fragment (DF) flag */
346                         if (mbuf->packet_type &
347                                 (RTE_PTYPE_L4_NONFRAG
348                                  | RTE_PTYPE_INNER_L4_NONFRAG))
349                                 ena_tx_ctx->df = true;
350                 }
351
352                 /* check if L4 checksum is needed */
353                 if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
354                     (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
355                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
356                         ena_tx_ctx->l4_csum_enable = true;
357                 } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
358                            (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
359                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
360                         ena_tx_ctx->l4_csum_enable = true;
361                 } else {
362                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
363                         ena_tx_ctx->l4_csum_enable = false;
364                 }
365
366                 ena_meta->mss = mbuf->tso_segsz;
367                 ena_meta->l3_hdr_len = mbuf->l3_len;
368                 ena_meta->l3_hdr_offset = mbuf->l2_len;
369
370                 ena_tx_ctx->meta_valid = true;
371         } else {
372                 ena_tx_ctx->meta_valid = false;
373         }
374 }
375
376 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
377 {
378         if (likely(req_id < rx_ring->ring_size))
379                 return 0;
380
381         RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id);
382
383         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
384         rx_ring->adapter->trigger_reset = true;
385
386         return -EFAULT;
387 }
388
389 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
390 {
391         struct ena_tx_buffer *tx_info = NULL;
392
393         if (likely(req_id < tx_ring->ring_size)) {
394                 tx_info = &tx_ring->tx_buffer_info[req_id];
395                 if (likely(tx_info->mbuf))
396                         return 0;
397         }
398
399         if (tx_info)
400                 RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n");
401         else
402                 RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
403
404         /* Trigger device reset */
405         tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
406         tx_ring->adapter->trigger_reset = true;
407         return -EFAULT;
408 }
409
410 static void ena_config_host_info(struct ena_com_dev *ena_dev)
411 {
412         struct ena_admin_host_info *host_info;
413         int rc;
414
415         /* Allocate only the host info */
416         rc = ena_com_allocate_host_info(ena_dev);
417         if (rc) {
418                 RTE_LOG(ERR, PMD, "Cannot allocate host info\n");
419                 return;
420         }
421
422         host_info = ena_dev->host_attr.host_info;
423
424         host_info->os_type = ENA_ADMIN_OS_DPDK;
425         host_info->kernel_ver = RTE_VERSION;
426         snprintf((char *)host_info->kernel_ver_str,
427                  sizeof(host_info->kernel_ver_str),
428                  "%s", rte_version());
429         host_info->os_dist = RTE_VERSION;
430         snprintf((char *)host_info->os_dist_str,
431                  sizeof(host_info->os_dist_str),
432                  "%s", rte_version());
433         host_info->driver_version =
434                 (DRV_MODULE_VER_MAJOR) |
435                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
436                 (DRV_MODULE_VER_SUBMINOR <<
437                         ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
438
439         rc = ena_com_set_host_attributes(ena_dev);
440         if (rc) {
441                 if (rc == -ENA_COM_UNSUPPORTED)
442                         RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
443                 else
444                         RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
445
446                 goto err;
447         }
448
449         return;
450
451 err:
452         ena_com_delete_host_info(ena_dev);
453 }
454
455 static int
456 ena_get_sset_count(struct rte_eth_dev *dev, int sset)
457 {
458         if (sset != ETH_SS_STATS)
459                 return -EOPNOTSUPP;
460
461          /* Workaround for clang:
462          * touch internal structures to prevent
463          * compiler error
464          */
465         ENA_TOUCH(ena_stats_global_strings);
466         ENA_TOUCH(ena_stats_tx_strings);
467         ENA_TOUCH(ena_stats_rx_strings);
468         ENA_TOUCH(ena_stats_ena_com_strings);
469
470         return  dev->data->nb_tx_queues *
471                 (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) +
472                 ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
473 }
474
475 static void ena_config_debug_area(struct ena_adapter *adapter)
476 {
477         u32 debug_area_size;
478         int rc, ss_count;
479
480         ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS);
481         if (ss_count <= 0) {
482                 RTE_LOG(ERR, PMD, "SS count is negative\n");
483                 return;
484         }
485
486         /* allocate 32 bytes for each string and 64bit for the value */
487         debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
488
489         rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
490         if (rc) {
491                 RTE_LOG(ERR, PMD, "Cannot allocate debug area\n");
492                 return;
493         }
494
495         rc = ena_com_set_host_attributes(&adapter->ena_dev);
496         if (rc) {
497                 if (rc == -ENA_COM_UNSUPPORTED)
498                         RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
499                 else
500                         RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
501
502                 goto err;
503         }
504
505         return;
506 err:
507         ena_com_delete_debug_area(&adapter->ena_dev);
508 }
509
510 static void ena_close(struct rte_eth_dev *dev)
511 {
512         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
513         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
514         struct ena_adapter *adapter =
515                 (struct ena_adapter *)(dev->data->dev_private);
516
517         if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
518                 ena_stop(dev);
519         adapter->state = ENA_ADAPTER_STATE_CLOSED;
520
521         ena_rx_queue_release_all(dev);
522         ena_tx_queue_release_all(dev);
523
524         rte_free(adapter->drv_stats);
525         adapter->drv_stats = NULL;
526
527         rte_intr_disable(intr_handle);
528         rte_intr_callback_unregister(intr_handle,
529                                      ena_interrupt_handler_rte,
530                                      adapter);
531
532         /*
533          * MAC is not allocated dynamically. Setting NULL should prevent from
534          * release of the resource in the rte_eth_dev_release_port().
535          */
536         dev->data->mac_addrs = NULL;
537 }
538
539 static int
540 ena_dev_reset(struct rte_eth_dev *dev)
541 {
542         struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES];
543         struct rte_eth_dev *eth_dev;
544         struct rte_pci_device *pci_dev;
545         struct rte_intr_handle *intr_handle;
546         struct ena_com_dev *ena_dev;
547         struct ena_com_dev_get_features_ctx get_feat_ctx;
548         struct ena_adapter *adapter;
549         int nb_queues;
550         int rc, i;
551         bool wd_state;
552
553         adapter = (struct ena_adapter *)(dev->data->dev_private);
554         ena_dev = &adapter->ena_dev;
555         eth_dev = adapter->rte_dev;
556         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
557         intr_handle = &pci_dev->intr_handle;
558         nb_queues = eth_dev->data->nb_rx_queues;
559
560         ena_com_set_admin_running_state(ena_dev, false);
561
562         rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
563         if (rc)
564                 RTE_LOG(ERR, PMD, "Device reset failed\n");
565
566         for (i = 0; i < nb_queues; i++)
567                 mb_pool_rx[i] = adapter->rx_ring[i].mb_pool;
568
569         ena_rx_queue_release_all(eth_dev);
570         ena_tx_queue_release_all(eth_dev);
571
572         rte_intr_disable(intr_handle);
573
574         ena_com_abort_admin_commands(ena_dev);
575         ena_com_wait_for_abort_completion(ena_dev);
576         ena_com_admin_destroy(ena_dev);
577         ena_com_mmio_reg_read_request_destroy(ena_dev);
578
579         rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
580         if (rc) {
581                 PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
582                 return rc;
583         }
584         adapter->wd_state = wd_state;
585
586         rte_intr_enable(intr_handle);
587         ena_com_set_admin_polling_mode(ena_dev, false);
588         ena_com_admin_aenq_enable(ena_dev);
589
590         for (i = 0; i < nb_queues; ++i)
591                 ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
592                         mb_pool_rx[i]);
593
594         for (i = 0; i < nb_queues; ++i)
595                 ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
596
597         adapter->trigger_reset = false;
598
599         return 0;
600 }
601
602 static int ena_rss_reta_update(struct rte_eth_dev *dev,
603                                struct rte_eth_rss_reta_entry64 *reta_conf,
604                                uint16_t reta_size)
605 {
606         struct ena_adapter *adapter =
607                 (struct ena_adapter *)(dev->data->dev_private);
608         struct ena_com_dev *ena_dev = &adapter->ena_dev;
609         int rc, i;
610         u16 entry_value;
611         int conf_idx;
612         int idx;
613
614         if ((reta_size == 0) || (reta_conf == NULL))
615                 return -EINVAL;
616
617         if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
618                 RTE_LOG(WARNING, PMD,
619                         "indirection table %d is bigger than supported (%d)\n",
620                         reta_size, ENA_RX_RSS_TABLE_SIZE);
621                 return -EINVAL;
622         }
623
624         for (i = 0 ; i < reta_size ; i++) {
625                 /* each reta_conf is for 64 entries.
626                  * to support 128 we use 2 conf of 64
627                  */
628                 conf_idx = i / RTE_RETA_GROUP_SIZE;
629                 idx = i % RTE_RETA_GROUP_SIZE;
630                 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
631                         entry_value =
632                                 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
633
634                         rc = ena_com_indirect_table_fill_entry(ena_dev,
635                                                                i,
636                                                                entry_value);
637                         if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
638                                 RTE_LOG(ERR, PMD,
639                                         "Cannot fill indirect table\n");
640                                 return rc;
641                         }
642                 }
643         }
644
645         rc = ena_com_indirect_table_set(ena_dev);
646         if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
647                 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
648                 return rc;
649         }
650
651         RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries  for port %d\n",
652                 __func__, reta_size, adapter->rte_dev->data->port_id);
653
654         return 0;
655 }
656
657 /* Query redirection table. */
658 static int ena_rss_reta_query(struct rte_eth_dev *dev,
659                               struct rte_eth_rss_reta_entry64 *reta_conf,
660                               uint16_t reta_size)
661 {
662         struct ena_adapter *adapter =
663                 (struct ena_adapter *)(dev->data->dev_private);
664         struct ena_com_dev *ena_dev = &adapter->ena_dev;
665         int rc;
666         int i;
667         u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
668         int reta_conf_idx;
669         int reta_idx;
670
671         if (reta_size == 0 || reta_conf == NULL ||
672             (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
673                 return -EINVAL;
674
675         rc = ena_com_indirect_table_get(ena_dev, indirect_table);
676         if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
677                 RTE_LOG(ERR, PMD, "cannot get indirect table\n");
678                 return -ENOTSUP;
679         }
680
681         for (i = 0 ; i < reta_size ; i++) {
682                 reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
683                 reta_idx = i % RTE_RETA_GROUP_SIZE;
684                 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
685                         reta_conf[reta_conf_idx].reta[reta_idx] =
686                                 ENA_IO_RXQ_IDX_REV(indirect_table[i]);
687         }
688
689         return 0;
690 }
691
692 static int ena_rss_init_default(struct ena_adapter *adapter)
693 {
694         struct ena_com_dev *ena_dev = &adapter->ena_dev;
695         uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
696         int rc, i;
697         u32 val;
698
699         rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
700         if (unlikely(rc)) {
701                 RTE_LOG(ERR, PMD, "Cannot init indirect table\n");
702                 goto err_rss_init;
703         }
704
705         for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
706                 val = i % nb_rx_queues;
707                 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
708                                                        ENA_IO_RXQ_IDX(val));
709                 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
710                         RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
711                         goto err_fill_indir;
712                 }
713         }
714
715         rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
716                                         ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
717         if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
718                 RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
719                 goto err_fill_indir;
720         }
721
722         rc = ena_com_set_default_hash_ctrl(ena_dev);
723         if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
724                 RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
725                 goto err_fill_indir;
726         }
727
728         rc = ena_com_indirect_table_set(ena_dev);
729         if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
730                 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
731                 goto err_fill_indir;
732         }
733         RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n",
734                 adapter->rte_dev->data->port_id);
735
736         return 0;
737
738 err_fill_indir:
739         ena_com_rss_destroy(ena_dev);
740 err_rss_init:
741
742         return rc;
743 }
744
745 static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
746 {
747         struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
748         int nb_queues = dev->data->nb_rx_queues;
749         int i;
750
751         for (i = 0; i < nb_queues; i++)
752                 ena_rx_queue_release(queues[i]);
753 }
754
755 static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
756 {
757         struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
758         int nb_queues = dev->data->nb_tx_queues;
759         int i;
760
761         for (i = 0; i < nb_queues; i++)
762                 ena_tx_queue_release(queues[i]);
763 }
764
765 static void ena_rx_queue_release(void *queue)
766 {
767         struct ena_ring *ring = (struct ena_ring *)queue;
768
769         ena_assert_msg(ring->configured,
770                        "API violation - releasing not configured queue");
771         ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
772                        "API violation");
773
774         /* Free ring resources */
775         if (ring->rx_buffer_info)
776                 rte_free(ring->rx_buffer_info);
777         ring->rx_buffer_info = NULL;
778
779         if (ring->empty_rx_reqs)
780                 rte_free(ring->empty_rx_reqs);
781         ring->empty_rx_reqs = NULL;
782
783         ring->configured = 0;
784
785         RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
786                 ring->port_id, ring->id);
787 }
788
789 static void ena_tx_queue_release(void *queue)
790 {
791         struct ena_ring *ring = (struct ena_ring *)queue;
792
793         ena_assert_msg(ring->configured,
794                        "API violation. Releasing not configured queue");
795         ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
796                        "API violation");
797
798         /* Free all bufs */
799         ena_tx_queue_release_bufs(ring);
800
801         /* Free ring resources */
802         if (ring->tx_buffer_info)
803                 rte_free(ring->tx_buffer_info);
804
805         if (ring->empty_tx_reqs)
806                 rte_free(ring->empty_tx_reqs);
807
808         ring->empty_tx_reqs = NULL;
809         ring->tx_buffer_info = NULL;
810
811         ring->configured = 0;
812
813         RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n",
814                 ring->port_id, ring->id);
815 }
816
817 static void ena_rx_queue_release_bufs(struct ena_ring *ring)
818 {
819         unsigned int ring_mask = ring->ring_size - 1;
820
821         while (ring->next_to_clean != ring->next_to_use) {
822                 struct rte_mbuf *m =
823                         ring->rx_buffer_info[ring->next_to_clean & ring_mask];
824
825                 if (m)
826                         rte_mbuf_raw_free(m);
827
828                 ring->next_to_clean++;
829         }
830 }
831
832 static void ena_tx_queue_release_bufs(struct ena_ring *ring)
833 {
834         unsigned int i;
835
836         for (i = 0; i < ring->ring_size; ++i) {
837                 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
838
839                 if (tx_buf->mbuf)
840                         rte_pktmbuf_free(tx_buf->mbuf);
841
842                 ring->next_to_clean++;
843         }
844 }
845
846 static int ena_link_update(struct rte_eth_dev *dev,
847                            __rte_unused int wait_to_complete)
848 {
849         struct rte_eth_link *link = &dev->data->dev_link;
850         struct ena_adapter *adapter;
851
852         adapter = (struct ena_adapter *)(dev->data->dev_private);
853
854         link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
855         link->link_speed = ETH_SPEED_NUM_NONE;
856         link->link_duplex = ETH_LINK_FULL_DUPLEX;
857
858         return 0;
859 }
860
861 static int ena_queue_restart_all(struct rte_eth_dev *dev,
862                                  enum ena_ring_type ring_type)
863 {
864         struct ena_adapter *adapter =
865                 (struct ena_adapter *)(dev->data->dev_private);
866         struct ena_ring *queues = NULL;
867         int nb_queues;
868         int i = 0;
869         int rc = 0;
870
871         if (ring_type == ENA_RING_TYPE_RX) {
872                 queues = adapter->rx_ring;
873                 nb_queues = dev->data->nb_rx_queues;
874         } else {
875                 queues = adapter->tx_ring;
876                 nb_queues = dev->data->nb_tx_queues;
877         }
878         for (i = 0; i < nb_queues; i++) {
879                 if (queues[i].configured) {
880                         if (ring_type == ENA_RING_TYPE_RX) {
881                                 ena_assert_msg(
882                                         dev->data->rx_queues[i] == &queues[i],
883                                         "Inconsistent state of rx queues\n");
884                         } else {
885                                 ena_assert_msg(
886                                         dev->data->tx_queues[i] == &queues[i],
887                                         "Inconsistent state of tx queues\n");
888                         }
889
890                         rc = ena_queue_restart(&queues[i]);
891
892                         if (rc) {
893                                 PMD_INIT_LOG(ERR,
894                                              "failed to restart queue %d type(%d)",
895                                              i, ring_type);
896                                 return rc;
897                         }
898                 }
899         }
900
901         return 0;
902 }
903
904 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
905 {
906         uint32_t max_frame_len = adapter->max_mtu;
907
908         if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
909             DEV_RX_OFFLOAD_JUMBO_FRAME)
910                 max_frame_len =
911                         adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
912
913         return max_frame_len;
914 }
915
916 static int ena_check_valid_conf(struct ena_adapter *adapter)
917 {
918         uint32_t max_frame_len = ena_get_mtu_conf(adapter);
919
920         if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
921                 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
922                                   "max mtu: %d, min mtu: %d\n",
923                              max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
924                 return ENA_COM_UNSUPPORTED;
925         }
926
927         return 0;
928 }
929
930 static int
931 ena_calc_queue_size(struct ena_com_dev *ena_dev,
932                     u16 *max_tx_sgl_size,
933                     struct ena_com_dev_get_features_ctx *get_feat_ctx)
934 {
935         uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
936
937         queue_size = RTE_MIN(queue_size,
938                              get_feat_ctx->max_queues.max_cq_depth);
939         queue_size = RTE_MIN(queue_size,
940                              get_feat_ctx->max_queues.max_sq_depth);
941
942         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
943                 queue_size = RTE_MIN(queue_size,
944                                      get_feat_ctx->max_queues.max_llq_depth);
945
946         /* Round down to power of 2 */
947         if (!rte_is_power_of_2(queue_size))
948                 queue_size = rte_align32pow2(queue_size >> 1);
949
950         if (unlikely(queue_size == 0)) {
951                 PMD_INIT_LOG(ERR, "Invalid queue size");
952                 return -EFAULT;
953         }
954
955         *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
956                 get_feat_ctx->max_queues.max_packet_tx_descs);
957
958         return queue_size;
959 }
960
961 static void ena_stats_restart(struct rte_eth_dev *dev)
962 {
963         struct ena_adapter *adapter =
964                 (struct ena_adapter *)(dev->data->dev_private);
965
966         rte_atomic64_init(&adapter->drv_stats->ierrors);
967         rte_atomic64_init(&adapter->drv_stats->oerrors);
968         rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
969 }
970
971 static int ena_stats_get(struct rte_eth_dev *dev,
972                           struct rte_eth_stats *stats)
973 {
974         struct ena_admin_basic_stats ena_stats;
975         struct ena_adapter *adapter =
976                 (struct ena_adapter *)(dev->data->dev_private);
977         struct ena_com_dev *ena_dev = &adapter->ena_dev;
978         int rc;
979
980         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
981                 return -ENOTSUP;
982
983         memset(&ena_stats, 0, sizeof(ena_stats));
984         rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
985         if (unlikely(rc)) {
986                 RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA");
987                 return rc;
988         }
989
990         /* Set of basic statistics from ENA */
991         stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
992                                           ena_stats.rx_pkts_low);
993         stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
994                                           ena_stats.tx_pkts_low);
995         stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
996                                         ena_stats.rx_bytes_low);
997         stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
998                                         ena_stats.tx_bytes_low);
999         stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high,
1000                                          ena_stats.rx_drops_low);
1001
1002         /* Driver related stats */
1003         stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
1004         stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
1005         stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
1006         return 0;
1007 }
1008
1009 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1010 {
1011         struct ena_adapter *adapter;
1012         struct ena_com_dev *ena_dev;
1013         int rc = 0;
1014
1015         ena_assert_msg(dev->data != NULL, "Uninitialized device");
1016         ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
1017         adapter = (struct ena_adapter *)(dev->data->dev_private);
1018
1019         ena_dev = &adapter->ena_dev;
1020         ena_assert_msg(ena_dev != NULL, "Uninitialized device");
1021
1022         if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
1023                 RTE_LOG(ERR, PMD,
1024                         "Invalid MTU setting. new_mtu: %d "
1025                         "max mtu: %d min mtu: %d\n",
1026                         mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1027                 return -EINVAL;
1028         }
1029
1030         rc = ena_com_set_dev_mtu(ena_dev, mtu);
1031         if (rc)
1032                 RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu);
1033         else
1034                 RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
1035
1036         return rc;
1037 }
1038
1039 static int ena_start(struct rte_eth_dev *dev)
1040 {
1041         struct ena_adapter *adapter =
1042                 (struct ena_adapter *)(dev->data->dev_private);
1043         uint64_t ticks;
1044         int rc = 0;
1045
1046         rc = ena_check_valid_conf(adapter);
1047         if (rc)
1048                 return rc;
1049
1050         rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX);
1051         if (rc)
1052                 return rc;
1053
1054         rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX);
1055         if (rc)
1056                 return rc;
1057
1058         if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1059             ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
1060                 rc = ena_rss_init_default(adapter);
1061                 if (rc)
1062                         return rc;
1063         }
1064
1065         ena_stats_restart(dev);
1066
1067         adapter->timestamp_wd = rte_get_timer_cycles();
1068         adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1069
1070         ticks = rte_get_timer_hz();
1071         rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1072                         ena_timer_wd_callback, adapter);
1073
1074         adapter->state = ENA_ADAPTER_STATE_RUNNING;
1075
1076         return 0;
1077 }
1078
1079 static void ena_stop(struct rte_eth_dev *dev)
1080 {
1081         struct ena_adapter *adapter =
1082                 (struct ena_adapter *)(dev->data->dev_private);
1083
1084         rte_timer_stop_sync(&adapter->timer_wd);
1085         ena_free_io_queues_all(adapter);
1086
1087         adapter->state = ENA_ADAPTER_STATE_STOPPED;
1088 }
1089
1090 static int ena_create_io_queue(struct ena_ring *ring)
1091 {
1092         struct ena_adapter *adapter;
1093         struct ena_com_dev *ena_dev;
1094         struct ena_com_create_io_ctx ctx =
1095                 /* policy set to _HOST just to satisfy icc compiler */
1096                 { ENA_ADMIN_PLACEMENT_POLICY_HOST,
1097                   0, 0, 0, 0, 0 };
1098         uint16_t ena_qid;
1099         int rc;
1100
1101         adapter = ring->adapter;
1102         ena_dev = &adapter->ena_dev;
1103
1104         if (ring->type == ENA_RING_TYPE_TX) {
1105                 ena_qid = ENA_IO_TXQ_IDX(ring->id);
1106                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1107                 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1108                 ctx.queue_size = adapter->tx_ring_size;
1109         } else {
1110                 ena_qid = ENA_IO_RXQ_IDX(ring->id);
1111                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1112                 ctx.queue_size = adapter->rx_ring_size;
1113         }
1114         ctx.qid = ena_qid;
1115         ctx.msix_vector = -1; /* interrupts not used */
1116         ctx.numa_node = ena_cpu_to_node(ring->id);
1117
1118         rc = ena_com_create_io_queue(ena_dev, &ctx);
1119         if (rc) {
1120                 RTE_LOG(ERR, PMD,
1121                         "failed to create io queue #%d (qid:%d) rc: %d\n",
1122                         ring->id, ena_qid, rc);
1123                 return rc;
1124         }
1125
1126         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1127                                      &ring->ena_com_io_sq,
1128                                      &ring->ena_com_io_cq);
1129         if (rc) {
1130                 RTE_LOG(ERR, PMD,
1131                         "Failed to get io queue handlers. queue num %d rc: %d\n",
1132                         ring->id, rc);
1133                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1134                 return rc;
1135         }
1136
1137         if (ring->type == ENA_RING_TYPE_TX)
1138                 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1139
1140         return 0;
1141 }
1142
1143 static void ena_free_io_queues_all(struct ena_adapter *adapter)
1144 {
1145         struct rte_eth_dev *eth_dev = adapter->rte_dev;
1146         struct ena_com_dev *ena_dev = &adapter->ena_dev;
1147         int i;
1148         uint16_t ena_qid;
1149         uint16_t nb_rxq = eth_dev->data->nb_rx_queues;
1150         uint16_t nb_txq = eth_dev->data->nb_tx_queues;
1151
1152         for (i = 0; i < nb_txq; ++i) {
1153                 ena_qid = ENA_IO_TXQ_IDX(i);
1154                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1155         }
1156
1157         for (i = 0; i < nb_rxq; ++i) {
1158                 ena_qid = ENA_IO_RXQ_IDX(i);
1159                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1160
1161                 ena_rx_queue_release_bufs(&adapter->rx_ring[i]);
1162         }
1163 }
1164
1165 static int ena_queue_restart(struct ena_ring *ring)
1166 {
1167         int rc, bufs_num;
1168
1169         ena_assert_msg(ring->configured == 1,
1170                        "Trying to restart unconfigured queue\n");
1171
1172         rc = ena_create_io_queue(ring);
1173         if (rc) {
1174                 PMD_INIT_LOG(ERR, "Failed to create IO queue!\n");
1175                 return rc;
1176         }
1177
1178         ring->next_to_clean = 0;
1179         ring->next_to_use = 0;
1180
1181         if (ring->type == ENA_RING_TYPE_TX)
1182                 return 0;
1183
1184         bufs_num = ring->ring_size - 1;
1185         rc = ena_populate_rx_queue(ring, bufs_num);
1186         if (rc != bufs_num) {
1187                 PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1188                 return ENA_COM_FAULT;
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int ena_tx_queue_setup(struct rte_eth_dev *dev,
1195                               uint16_t queue_idx,
1196                               uint16_t nb_desc,
1197                               __rte_unused unsigned int socket_id,
1198                               const struct rte_eth_txconf *tx_conf)
1199 {
1200         struct ena_ring *txq = NULL;
1201         struct ena_adapter *adapter =
1202                 (struct ena_adapter *)(dev->data->dev_private);
1203         unsigned int i;
1204
1205         txq = &adapter->tx_ring[queue_idx];
1206
1207         if (txq->configured) {
1208                 RTE_LOG(CRIT, PMD,
1209                         "API violation. Queue %d is already configured\n",
1210                         queue_idx);
1211                 return ENA_COM_FAULT;
1212         }
1213
1214         if (!rte_is_power_of_2(nb_desc)) {
1215                 RTE_LOG(ERR, PMD,
1216                         "Unsupported size of TX queue: %d is not a power of 2.",
1217                         nb_desc);
1218                 return -EINVAL;
1219         }
1220
1221         if (nb_desc > adapter->tx_ring_size) {
1222                 RTE_LOG(ERR, PMD,
1223                         "Unsupported size of TX queue (max size: %d)\n",
1224                         adapter->tx_ring_size);
1225                 return -EINVAL;
1226         }
1227
1228         txq->port_id = dev->data->port_id;
1229         txq->next_to_clean = 0;
1230         txq->next_to_use = 0;
1231         txq->ring_size = nb_desc;
1232
1233         txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
1234                                           sizeof(struct ena_tx_buffer) *
1235                                           txq->ring_size,
1236                                           RTE_CACHE_LINE_SIZE);
1237         if (!txq->tx_buffer_info) {
1238                 RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
1239                 return -ENOMEM;
1240         }
1241
1242         txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
1243                                          sizeof(u16) * txq->ring_size,
1244                                          RTE_CACHE_LINE_SIZE);
1245         if (!txq->empty_tx_reqs) {
1246                 RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
1247                 rte_free(txq->tx_buffer_info);
1248                 return -ENOMEM;
1249         }
1250
1251         for (i = 0; i < txq->ring_size; i++)
1252                 txq->empty_tx_reqs[i] = i;
1253
1254         if (tx_conf != NULL) {
1255                 txq->offloads =
1256                         tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1257         }
1258
1259         /* Store pointer to this queue in upper layer */
1260         txq->configured = 1;
1261         dev->data->tx_queues[queue_idx] = txq;
1262
1263         return 0;
1264 }
1265
1266 static int ena_rx_queue_setup(struct rte_eth_dev *dev,
1267                               uint16_t queue_idx,
1268                               uint16_t nb_desc,
1269                               __rte_unused unsigned int socket_id,
1270                               __rte_unused const struct rte_eth_rxconf *rx_conf,
1271                               struct rte_mempool *mp)
1272 {
1273         struct ena_adapter *adapter =
1274                 (struct ena_adapter *)(dev->data->dev_private);
1275         struct ena_ring *rxq = NULL;
1276         int i;
1277
1278         rxq = &adapter->rx_ring[queue_idx];
1279         if (rxq->configured) {
1280                 RTE_LOG(CRIT, PMD,
1281                         "API violation. Queue %d is already configured\n",
1282                         queue_idx);
1283                 return ENA_COM_FAULT;
1284         }
1285
1286         if (!rte_is_power_of_2(nb_desc)) {
1287                 RTE_LOG(ERR, PMD,
1288                         "Unsupported size of RX queue: %d is not a power of 2.",
1289                         nb_desc);
1290                 return -EINVAL;
1291         }
1292
1293         if (nb_desc > adapter->rx_ring_size) {
1294                 RTE_LOG(ERR, PMD,
1295                         "Unsupported size of RX queue (max size: %d)\n",
1296                         adapter->rx_ring_size);
1297                 return -EINVAL;
1298         }
1299
1300         rxq->port_id = dev->data->port_id;
1301         rxq->next_to_clean = 0;
1302         rxq->next_to_use = 0;
1303         rxq->ring_size = nb_desc;
1304         rxq->mb_pool = mp;
1305
1306         rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
1307                                           sizeof(struct rte_mbuf *) * nb_desc,
1308                                           RTE_CACHE_LINE_SIZE);
1309         if (!rxq->rx_buffer_info) {
1310                 RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
1311                 return -ENOMEM;
1312         }
1313
1314         rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1315                                          sizeof(uint16_t) * nb_desc,
1316                                          RTE_CACHE_LINE_SIZE);
1317         if (!rxq->empty_rx_reqs) {
1318                 RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
1319                 rte_free(rxq->rx_buffer_info);
1320                 rxq->rx_buffer_info = NULL;
1321                 return -ENOMEM;
1322         }
1323
1324         for (i = 0; i < nb_desc; i++)
1325                 rxq->empty_tx_reqs[i] = i;
1326
1327         /* Store pointer to this queue in upper layer */
1328         rxq->configured = 1;
1329         dev->data->rx_queues[queue_idx] = rxq;
1330
1331         return 0;
1332 }
1333
1334 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
1335 {
1336         unsigned int i;
1337         int rc;
1338         uint16_t ring_size = rxq->ring_size;
1339         uint16_t ring_mask = ring_size - 1;
1340         uint16_t next_to_use = rxq->next_to_use;
1341         uint16_t in_use, req_id;
1342         struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
1343
1344         if (unlikely(!count))
1345                 return 0;
1346
1347         in_use = rxq->next_to_use - rxq->next_to_clean;
1348         ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
1349
1350         count = RTE_MIN(count,
1351                         (uint16_t)(ring_size - (next_to_use & ring_mask)));
1352
1353         /* get resources for incoming packets */
1354         rc = rte_mempool_get_bulk(rxq->mb_pool,
1355                                   (void **)(&mbufs[next_to_use & ring_mask]),
1356                                   count);
1357         if (unlikely(rc < 0)) {
1358                 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
1359                 PMD_RX_LOG(DEBUG, "there are no enough free buffers");
1360                 return 0;
1361         }
1362
1363         for (i = 0; i < count; i++) {
1364                 uint16_t next_to_use_masked = next_to_use & ring_mask;
1365                 struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
1366                 struct ena_com_buf ebuf;
1367
1368                 rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
1369
1370                 req_id = rxq->empty_rx_reqs[next_to_use_masked];
1371                 rc = validate_rx_req_id(rxq, req_id);
1372                 if (unlikely(rc < 0))
1373                         break;
1374
1375                 /* prepare physical address for DMA transaction */
1376                 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
1377                 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
1378                 /* pass resource to device */
1379                 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
1380                                                 &ebuf, req_id);
1381                 if (unlikely(rc)) {
1382                         rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
1383                                              count - i);
1384                         RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
1385                         break;
1386                 }
1387                 next_to_use++;
1388         }
1389
1390         if (unlikely(i < count))
1391                 RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
1392                         "buffers (from %d)\n", rxq->id, i, count);
1393
1394         /* When we submitted free recources to device... */
1395         if (likely(i > 0)) {
1396                 /* ...let HW know that it can fill buffers with data
1397                  *
1398                  * Add memory barrier to make sure the desc were written before
1399                  * issue a doorbell
1400                  */
1401                 rte_wmb();
1402                 ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
1403
1404                 rxq->next_to_use = next_to_use;
1405         }
1406
1407         return i;
1408 }
1409
1410 static int ena_device_init(struct ena_com_dev *ena_dev,
1411                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
1412                            bool *wd_state)
1413 {
1414         uint32_t aenq_groups;
1415         int rc;
1416         bool readless_supported;
1417
1418         /* Initialize mmio registers */
1419         rc = ena_com_mmio_reg_read_request_init(ena_dev);
1420         if (rc) {
1421                 RTE_LOG(ERR, PMD, "failed to init mmio read less\n");
1422                 return rc;
1423         }
1424
1425         /* The PCIe configuration space revision id indicate if mmio reg
1426          * read is disabled.
1427          */
1428         readless_supported =
1429                 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1430                                & ENA_MMIO_DISABLE_REG_READ);
1431         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1432
1433         /* reset device */
1434         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1435         if (rc) {
1436                 RTE_LOG(ERR, PMD, "cannot reset device\n");
1437                 goto err_mmio_read_less;
1438         }
1439
1440         /* check FW version */
1441         rc = ena_com_validate_version(ena_dev);
1442         if (rc) {
1443                 RTE_LOG(ERR, PMD, "device version is too low\n");
1444                 goto err_mmio_read_less;
1445         }
1446
1447         ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
1448
1449         /* ENA device administration layer init */
1450         rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
1451         if (rc) {
1452                 RTE_LOG(ERR, PMD,
1453                         "cannot initialize ena admin queue with device\n");
1454                 goto err_mmio_read_less;
1455         }
1456
1457         /* To enable the msix interrupts the driver needs to know the number
1458          * of queues. So the driver uses polling mode to retrieve this
1459          * information.
1460          */
1461         ena_com_set_admin_polling_mode(ena_dev, true);
1462
1463         ena_config_host_info(ena_dev);
1464
1465         /* Get Device Attributes and features */
1466         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
1467         if (rc) {
1468                 RTE_LOG(ERR, PMD,
1469                         "cannot get attribute for ena device rc= %d\n", rc);
1470                 goto err_admin_init;
1471         }
1472
1473         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1474                       BIT(ENA_ADMIN_NOTIFICATION) |
1475                       BIT(ENA_ADMIN_KEEP_ALIVE) |
1476                       BIT(ENA_ADMIN_FATAL_ERROR) |
1477                       BIT(ENA_ADMIN_WARNING);
1478
1479         aenq_groups &= get_feat_ctx->aenq.supported_groups;
1480         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1481         if (rc) {
1482                 RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc);
1483                 goto err_admin_init;
1484         }
1485
1486         *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1487
1488         return 0;
1489
1490 err_admin_init:
1491         ena_com_admin_destroy(ena_dev);
1492
1493 err_mmio_read_less:
1494         ena_com_mmio_reg_read_request_destroy(ena_dev);
1495
1496         return rc;
1497 }
1498
1499 static void ena_interrupt_handler_rte(void *cb_arg)
1500 {
1501         struct ena_adapter *adapter = (struct ena_adapter *)cb_arg;
1502         struct ena_com_dev *ena_dev = &adapter->ena_dev;
1503
1504         ena_com_admin_q_comp_intr_handler(ena_dev);
1505         if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1506                 ena_com_aenq_intr_handler(ena_dev, adapter);
1507 }
1508
1509 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
1510 {
1511         if (!adapter->wd_state)
1512                 return;
1513
1514         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
1515                 return;
1516
1517         if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
1518             adapter->keep_alive_timeout)) {
1519                 RTE_LOG(ERR, PMD, "Keep alive timeout\n");
1520                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
1521                 adapter->trigger_reset = true;
1522         }
1523 }
1524
1525 /* Check if admin queue is enabled */
1526 static void check_for_admin_com_state(struct ena_adapter *adapter)
1527 {
1528         if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
1529                 RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n");
1530                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
1531                 adapter->trigger_reset = true;
1532         }
1533 }
1534
1535 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1536                                   void *arg)
1537 {
1538         struct ena_adapter *adapter = (struct ena_adapter *)arg;
1539         struct rte_eth_dev *dev = adapter->rte_dev;
1540
1541         check_for_missing_keep_alive(adapter);
1542         check_for_admin_com_state(adapter);
1543
1544         if (unlikely(adapter->trigger_reset)) {
1545                 RTE_LOG(ERR, PMD, "Trigger reset is on\n");
1546                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1547                         NULL);
1548         }
1549 }
1550
1551 static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
1552                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
1553 {
1554         int io_sq_num, io_cq_num, io_queue_num;
1555
1556         io_sq_num = get_feat_ctx->max_queues.max_sq_num;
1557         io_cq_num = get_feat_ctx->max_queues.max_cq_num;
1558
1559         io_queue_num = RTE_MIN(io_sq_num, io_cq_num);
1560
1561         if (unlikely(io_queue_num == 0)) {
1562                 RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n");
1563                 return -EFAULT;
1564         }
1565
1566         return io_queue_num;
1567 }
1568
1569 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
1570 {
1571         struct rte_pci_device *pci_dev;
1572         struct rte_intr_handle *intr_handle;
1573         struct ena_adapter *adapter =
1574                 (struct ena_adapter *)(eth_dev->data->dev_private);
1575         struct ena_com_dev *ena_dev = &adapter->ena_dev;
1576         struct ena_com_dev_get_features_ctx get_feat_ctx;
1577         int queue_size, rc;
1578         u16 tx_sgl_size = 0;
1579
1580         static int adapters_found;
1581         bool wd_state;
1582
1583         memset(adapter, 0, sizeof(struct ena_adapter));
1584         ena_dev = &adapter->ena_dev;
1585
1586         eth_dev->dev_ops = &ena_dev_ops;
1587         eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
1588         eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
1589         eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
1590         adapter->rte_eth_dev_data = eth_dev->data;
1591         adapter->rte_dev = eth_dev;
1592
1593         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1594                 return 0;
1595
1596         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1597         adapter->pdev = pci_dev;
1598
1599         PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
1600                      pci_dev->addr.domain,
1601                      pci_dev->addr.bus,
1602                      pci_dev->addr.devid,
1603                      pci_dev->addr.function);
1604
1605         intr_handle = &pci_dev->intr_handle;
1606
1607         adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
1608         adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
1609
1610         if (!adapter->regs) {
1611                 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
1612                              ENA_REGS_BAR);
1613                 return -ENXIO;
1614         }
1615
1616         ena_dev->reg_bar = adapter->regs;
1617         ena_dev->dmadev = adapter->pdev;
1618
1619         adapter->id_number = adapters_found;
1620
1621         snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
1622                  adapter->id_number);
1623
1624         /* device specific initialization routine */
1625         rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
1626         if (rc) {
1627                 PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1628                 goto err;
1629         }
1630         adapter->wd_state = wd_state;
1631
1632         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1633         adapter->num_queues = ena_calc_io_queue_num(ena_dev,
1634                                                     &get_feat_ctx);
1635
1636         queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
1637         if (queue_size <= 0 || adapter->num_queues <= 0) {
1638                 rc = -EFAULT;
1639                 goto err_device_destroy;
1640         }
1641
1642         adapter->tx_ring_size = queue_size;
1643         adapter->rx_ring_size = queue_size;
1644
1645         adapter->max_tx_sgl_size = tx_sgl_size;
1646
1647         /* prepare ring structures */
1648         ena_init_rings(adapter);
1649
1650         ena_config_debug_area(adapter);
1651
1652         /* Set max MTU for this device */
1653         adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
1654
1655         /* set device support for TSO */
1656         adapter->tso4_supported = get_feat_ctx.offload.tx &
1657                                   ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
1658
1659         /* Copy MAC address and point DPDK to it */
1660         eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
1661         ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
1662                         (struct ether_addr *)adapter->mac_addr);
1663
1664         /*
1665          * Pass the information to the rte_eth_dev_close() that it should also
1666          * release the private port resources.
1667          */
1668         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1669
1670         adapter->drv_stats = rte_zmalloc("adapter stats",
1671                                          sizeof(*adapter->drv_stats),
1672                                          RTE_CACHE_LINE_SIZE);
1673         if (!adapter->drv_stats) {
1674                 RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
1675                 rc = -ENOMEM;
1676                 goto err_delete_debug_area;
1677         }
1678
1679         rte_intr_callback_register(intr_handle,
1680                                    ena_interrupt_handler_rte,
1681                                    adapter);
1682         rte_intr_enable(intr_handle);
1683         ena_com_set_admin_polling_mode(ena_dev, false);
1684         ena_com_admin_aenq_enable(ena_dev);
1685
1686         if (adapters_found == 0)
1687                 rte_timer_subsystem_init();
1688         rte_timer_init(&adapter->timer_wd);
1689
1690         adapters_found++;
1691         adapter->state = ENA_ADAPTER_STATE_INIT;
1692
1693         return 0;
1694
1695 err_delete_debug_area:
1696         ena_com_delete_debug_area(ena_dev);
1697
1698 err_device_destroy:
1699         ena_com_delete_host_info(ena_dev);
1700         ena_com_admin_destroy(ena_dev);
1701
1702 err:
1703         return rc;
1704 }
1705
1706 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
1707 {
1708         struct ena_adapter *adapter =
1709                 (struct ena_adapter *)(eth_dev->data->dev_private);
1710
1711         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1712                 return 0;
1713
1714         if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1715                 ena_close(eth_dev);
1716
1717         eth_dev->dev_ops = NULL;
1718         eth_dev->rx_pkt_burst = NULL;
1719         eth_dev->tx_pkt_burst = NULL;
1720         eth_dev->tx_pkt_prepare = NULL;
1721
1722         adapter->state = ENA_ADAPTER_STATE_FREE;
1723
1724         return 0;
1725 }
1726
1727 static int ena_dev_configure(struct rte_eth_dev *dev)
1728 {
1729         struct ena_adapter *adapter =
1730                 (struct ena_adapter *)(dev->data->dev_private);
1731
1732         adapter->state = ENA_ADAPTER_STATE_CONFIG;
1733
1734         adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1735         adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
1736         return 0;
1737 }
1738
1739 static void ena_init_rings(struct ena_adapter *adapter)
1740 {
1741         int i;
1742
1743         for (i = 0; i < adapter->num_queues; i++) {
1744                 struct ena_ring *ring = &adapter->tx_ring[i];
1745
1746                 ring->configured = 0;
1747                 ring->type = ENA_RING_TYPE_TX;
1748                 ring->adapter = adapter;
1749                 ring->id = i;
1750                 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
1751                 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
1752                 ring->sgl_size = adapter->max_tx_sgl_size;
1753         }
1754
1755         for (i = 0; i < adapter->num_queues; i++) {
1756                 struct ena_ring *ring = &adapter->rx_ring[i];
1757
1758                 ring->configured = 0;
1759                 ring->type = ENA_RING_TYPE_RX;
1760                 ring->adapter = adapter;
1761                 ring->id = i;
1762         }
1763 }
1764
1765 static void ena_infos_get(struct rte_eth_dev *dev,
1766                           struct rte_eth_dev_info *dev_info)
1767 {
1768         struct ena_adapter *adapter;
1769         struct ena_com_dev *ena_dev;
1770         struct ena_com_dev_get_features_ctx feat;
1771         uint64_t rx_feat = 0, tx_feat = 0;
1772         int rc = 0;
1773
1774         ena_assert_msg(dev->data != NULL, "Uninitialized device");
1775         ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
1776         adapter = (struct ena_adapter *)(dev->data->dev_private);
1777
1778         ena_dev = &adapter->ena_dev;
1779         ena_assert_msg(ena_dev != NULL, "Uninitialized device");
1780
1781         dev_info->speed_capa =
1782                         ETH_LINK_SPEED_1G   |
1783                         ETH_LINK_SPEED_2_5G |
1784                         ETH_LINK_SPEED_5G   |
1785                         ETH_LINK_SPEED_10G  |
1786                         ETH_LINK_SPEED_25G  |
1787                         ETH_LINK_SPEED_40G  |
1788                         ETH_LINK_SPEED_50G  |
1789                         ETH_LINK_SPEED_100G;
1790
1791         /* Get supported features from HW */
1792         rc = ena_com_get_dev_attr_feat(ena_dev, &feat);
1793         if (unlikely(rc)) {
1794                 RTE_LOG(ERR, PMD,
1795                         "Cannot get attribute for ena device rc= %d\n", rc);
1796                 return;
1797         }
1798
1799         /* Set Tx & Rx features available for device */
1800         if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
1801                 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
1802
1803         if (feat.offload.tx &
1804             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
1805                 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1806                         DEV_TX_OFFLOAD_UDP_CKSUM |
1807                         DEV_TX_OFFLOAD_TCP_CKSUM;
1808
1809         if (feat.offload.rx_supported &
1810             ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
1811                 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1812                         DEV_RX_OFFLOAD_UDP_CKSUM  |
1813                         DEV_RX_OFFLOAD_TCP_CKSUM;
1814
1815         rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1816
1817         /* Inform framework about available features */
1818         dev_info->rx_offload_capa = rx_feat;
1819         dev_info->rx_queue_offload_capa = rx_feat;
1820         dev_info->tx_offload_capa = tx_feat;
1821         dev_info->tx_queue_offload_capa = tx_feat;
1822
1823         dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
1824         dev_info->max_rx_pktlen  = adapter->max_mtu;
1825         dev_info->max_mac_addrs = 1;
1826
1827         dev_info->max_rx_queues = adapter->num_queues;
1828         dev_info->max_tx_queues = adapter->num_queues;
1829         dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
1830
1831         adapter->tx_supported_offloads = tx_feat;
1832         adapter->rx_supported_offloads = rx_feat;
1833
1834         dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
1835         dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
1836
1837         dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
1838         dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
1839         dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
1840                                         feat.max_queues.max_packet_tx_descs);
1841         dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
1842                                         feat.max_queues.max_packet_tx_descs);
1843 }
1844
1845 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1846                                   uint16_t nb_pkts)
1847 {
1848         struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
1849         unsigned int ring_size = rx_ring->ring_size;
1850         unsigned int ring_mask = ring_size - 1;
1851         uint16_t next_to_clean = rx_ring->next_to_clean;
1852         uint16_t desc_in_use = 0;
1853         uint16_t req_id;
1854         unsigned int recv_idx = 0;
1855         struct rte_mbuf *mbuf = NULL;
1856         struct rte_mbuf *mbuf_head = NULL;
1857         struct rte_mbuf *mbuf_prev = NULL;
1858         struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info;
1859         unsigned int completed;
1860
1861         struct ena_com_rx_ctx ena_rx_ctx;
1862         int rc = 0;
1863
1864         /* Check adapter state */
1865         if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
1866                 RTE_LOG(ALERT, PMD,
1867                         "Trying to receive pkts while device is NOT running\n");
1868                 return 0;
1869         }
1870
1871         desc_in_use = rx_ring->next_to_use - next_to_clean;
1872         if (unlikely(nb_pkts > desc_in_use))
1873                 nb_pkts = desc_in_use;
1874
1875         for (completed = 0; completed < nb_pkts; completed++) {
1876                 int segments = 0;
1877
1878                 ena_rx_ctx.max_bufs = rx_ring->ring_size;
1879                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1880                 ena_rx_ctx.descs = 0;
1881                 /* receive packet context */
1882                 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1883                                     rx_ring->ena_com_io_sq,
1884                                     &ena_rx_ctx);
1885                 if (unlikely(rc)) {
1886                         RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
1887                         rx_ring->adapter->trigger_reset = true;
1888                         return 0;
1889                 }
1890
1891                 if (unlikely(ena_rx_ctx.descs == 0))
1892                         break;
1893
1894                 while (segments < ena_rx_ctx.descs) {
1895                         req_id = ena_rx_ctx.ena_bufs[segments].req_id;
1896                         rc = validate_rx_req_id(rx_ring, req_id);
1897                         if (unlikely(rc))
1898                                 break;
1899
1900                         mbuf = rx_buff_info[req_id];
1901                         mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
1902                         mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1903                         mbuf->refcnt = 1;
1904                         mbuf->next = NULL;
1905                         if (unlikely(segments == 0)) {
1906                                 mbuf->nb_segs = ena_rx_ctx.descs;
1907                                 mbuf->port = rx_ring->port_id;
1908                                 mbuf->pkt_len = 0;
1909                                 mbuf_head = mbuf;
1910                         } else {
1911                                 /* for multi-segment pkts create mbuf chain */
1912                                 mbuf_prev->next = mbuf;
1913                         }
1914                         mbuf_head->pkt_len += mbuf->data_len;
1915
1916                         mbuf_prev = mbuf;
1917                         rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
1918                                 req_id;
1919                         segments++;
1920                         next_to_clean++;
1921                 }
1922
1923                 /* fill mbuf attributes if any */
1924                 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
1925                 mbuf_head->hash.rss = ena_rx_ctx.hash;
1926
1927                 /* pass to DPDK application head mbuf */
1928                 rx_pkts[recv_idx] = mbuf_head;
1929                 recv_idx++;
1930         }
1931
1932         rx_ring->next_to_clean = next_to_clean;
1933
1934         desc_in_use = desc_in_use - completed + 1;
1935         /* Burst refill to save doorbells, memory barriers, const interval */
1936         if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
1937                 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
1938
1939         return recv_idx;
1940 }
1941
1942 static uint16_t
1943 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1944                 uint16_t nb_pkts)
1945 {
1946         int32_t ret;
1947         uint32_t i;
1948         struct rte_mbuf *m;
1949         struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
1950         struct ipv4_hdr *ip_hdr;
1951         uint64_t ol_flags;
1952         uint16_t frag_field;
1953
1954         for (i = 0; i != nb_pkts; i++) {
1955                 m = tx_pkts[i];
1956                 ol_flags = m->ol_flags;
1957
1958                 if (!(ol_flags & PKT_TX_IPV4))
1959                         continue;
1960
1961                 /* If there was not L2 header length specified, assume it is
1962                  * length of the ethernet header.
1963                  */
1964                 if (unlikely(m->l2_len == 0))
1965                         m->l2_len = sizeof(struct ether_hdr);
1966
1967                 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1968                                                  m->l2_len);
1969                 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
1970
1971                 if ((frag_field & IPV4_HDR_DF_FLAG) != 0) {
1972                         m->packet_type |= RTE_PTYPE_L4_NONFRAG;
1973
1974                         /* If IPv4 header has DF flag enabled and TSO support is
1975                          * disabled, partial chcecksum should not be calculated.
1976                          */
1977                         if (!tx_ring->adapter->tso4_supported)
1978                                 continue;
1979                 }
1980
1981                 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
1982                                 (ol_flags & PKT_TX_L4_MASK) ==
1983                                 PKT_TX_SCTP_CKSUM) {
1984                         rte_errno = -ENOTSUP;
1985                         return i;
1986                 }
1987
1988 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1989                 ret = rte_validate_tx_offload(m);
1990                 if (ret != 0) {
1991                         rte_errno = ret;
1992                         return i;
1993                 }
1994 #endif
1995
1996                 /* In case we are supposed to TSO and have DF not set (DF=0)
1997                  * hardware must be provided with partial checksum, otherwise
1998                  * it will take care of necessary calculations.
1999                  */
2000
2001                 ret = rte_net_intel_cksum_flags_prepare(m,
2002                         ol_flags & ~PKT_TX_TCP_SEG);
2003                 if (ret != 0) {
2004                         rte_errno = ret;
2005                         return i;
2006                 }
2007         }
2008
2009         return i;
2010 }
2011
2012 static void ena_update_hints(struct ena_adapter *adapter,
2013                              struct ena_admin_ena_hw_hints *hints)
2014 {
2015         if (hints->admin_completion_tx_timeout)
2016                 adapter->ena_dev.admin_queue.completion_timeout =
2017                         hints->admin_completion_tx_timeout * 1000;
2018
2019         if (hints->mmio_read_timeout)
2020                 /* convert to usec */
2021                 adapter->ena_dev.mmio_read.reg_read_to =
2022                         hints->mmio_read_timeout * 1000;
2023
2024         if (hints->driver_watchdog_timeout) {
2025                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2026                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2027                 else
2028                         // Convert msecs to ticks
2029                         adapter->keep_alive_timeout =
2030                                 (hints->driver_watchdog_timeout *
2031                                 rte_get_timer_hz()) / 1000;
2032         }
2033 }
2034
2035 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
2036                                         struct rte_mbuf *mbuf)
2037 {
2038         int num_segments, rc;
2039
2040         num_segments = mbuf->nb_segs;
2041
2042         if (likely(num_segments < tx_ring->sgl_size))
2043                 return 0;
2044
2045         rc = rte_pktmbuf_linearize(mbuf);
2046         if (unlikely(rc))
2047                 RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
2048
2049         return rc;
2050 }
2051
2052 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2053                                   uint16_t nb_pkts)
2054 {
2055         struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2056         uint16_t next_to_use = tx_ring->next_to_use;
2057         uint16_t next_to_clean = tx_ring->next_to_clean;
2058         struct rte_mbuf *mbuf;
2059         unsigned int ring_size = tx_ring->ring_size;
2060         unsigned int ring_mask = ring_size - 1;
2061         struct ena_com_tx_ctx ena_tx_ctx;
2062         struct ena_tx_buffer *tx_info;
2063         struct ena_com_buf *ebuf;
2064         uint16_t rc, req_id, total_tx_descs = 0;
2065         uint16_t sent_idx = 0, empty_tx_reqs;
2066         int nb_hw_desc;
2067
2068         /* Check adapter state */
2069         if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
2070                 RTE_LOG(ALERT, PMD,
2071                         "Trying to xmit pkts while device is NOT running\n");
2072                 return 0;
2073         }
2074
2075         empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
2076         if (nb_pkts > empty_tx_reqs)
2077                 nb_pkts = empty_tx_reqs;
2078
2079         for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
2080                 mbuf = tx_pkts[sent_idx];
2081
2082                 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
2083                 if (unlikely(rc))
2084                         break;
2085
2086                 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
2087                 tx_info = &tx_ring->tx_buffer_info[req_id];
2088                 tx_info->mbuf = mbuf;
2089                 tx_info->num_of_bufs = 0;
2090                 ebuf = tx_info->bufs;
2091
2092                 /* Prepare TX context */
2093                 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2094                 memset(&ena_tx_ctx.ena_meta, 0x0,
2095                        sizeof(struct ena_com_tx_meta));
2096                 ena_tx_ctx.ena_bufs = ebuf;
2097                 ena_tx_ctx.req_id = req_id;
2098                 if (tx_ring->tx_mem_queue_type ==
2099                                 ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2100                         /* prepare the push buffer with
2101                          * virtual address of the data
2102                          */
2103                         ena_tx_ctx.header_len =
2104                                 RTE_MIN(mbuf->data_len,
2105                                         tx_ring->tx_max_header_size);
2106                         ena_tx_ctx.push_header =
2107                                 (void *)((char *)mbuf->buf_addr +
2108                                          mbuf->data_off);
2109                 } /* there's no else as we take advantage of memset zeroing */
2110
2111                 /* Set TX offloads flags, if applicable */
2112                 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
2113
2114                 if (unlikely(mbuf->ol_flags &
2115                              (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
2116                         rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
2117
2118                 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);
2119
2120                 /* Process first segment taking into
2121                  * consideration pushed header
2122                  */
2123                 if (mbuf->data_len > ena_tx_ctx.header_len) {
2124                         ebuf->paddr = mbuf->buf_iova +
2125                                       mbuf->data_off +
2126                                       ena_tx_ctx.header_len;
2127                         ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
2128                         ebuf++;
2129                         tx_info->num_of_bufs++;
2130                 }
2131
2132                 while ((mbuf = mbuf->next) != NULL) {
2133                         ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
2134                         ebuf->len = mbuf->data_len;
2135                         ebuf++;
2136                         tx_info->num_of_bufs++;
2137                 }
2138
2139                 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2140
2141                 /* Write data to device */
2142                 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
2143                                         &ena_tx_ctx, &nb_hw_desc);
2144                 if (unlikely(rc))
2145                         break;
2146
2147                 tx_info->tx_descs = nb_hw_desc;
2148
2149                 next_to_use++;
2150         }
2151
2152         /* If there are ready packets to be xmitted... */
2153         if (sent_idx > 0) {
2154                 /* ...let HW do its best :-) */
2155                 rte_wmb();
2156                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2157
2158                 tx_ring->next_to_use = next_to_use;
2159         }
2160
2161         /* Clear complete packets  */
2162         while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
2163                 rc = validate_tx_req_id(tx_ring, req_id);
2164                 if (rc)
2165                         break;
2166
2167                 /* Get Tx info & store how many descs were processed  */
2168                 tx_info = &tx_ring->tx_buffer_info[req_id];
2169                 total_tx_descs += tx_info->tx_descs;
2170
2171                 /* Free whole mbuf chain  */
2172                 mbuf = tx_info->mbuf;
2173                 rte_pktmbuf_free(mbuf);
2174                 tx_info->mbuf = NULL;
2175
2176                 /* Put back descriptor to the ring for reuse */
2177                 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
2178                 next_to_clean++;
2179
2180                 /* If too many descs to clean, leave it for another run */
2181                 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
2182                         break;
2183         }
2184
2185         if (total_tx_descs > 0) {
2186                 /* acknowledge completion of sent packets */
2187                 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
2188                 tx_ring->next_to_clean = next_to_clean;
2189         }
2190
2191         return sent_idx;
2192 }
2193
2194 /*********************************************************************
2195  *  PMD configuration
2196  *********************************************************************/
2197 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2198         struct rte_pci_device *pci_dev)
2199 {
2200         return rte_eth_dev_pci_generic_probe(pci_dev,
2201                 sizeof(struct ena_adapter), eth_ena_dev_init);
2202 }
2203
2204 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
2205 {
2206         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
2207 }
2208
2209 static struct rte_pci_driver rte_ena_pmd = {
2210         .id_table = pci_id_ena_map,
2211         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2212                      RTE_PCI_DRV_WC_ACTIVATE,
2213         .probe = eth_ena_pci_probe,
2214         .remove = eth_ena_pci_remove,
2215 };
2216
2217 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
2218 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
2219 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
2220
2221 RTE_INIT(ena_init_log)
2222 {
2223         ena_logtype_init = rte_log_register("pmd.net.ena.init");
2224         if (ena_logtype_init >= 0)
2225                 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE);
2226         ena_logtype_driver = rte_log_register("pmd.net.ena.driver");
2227         if (ena_logtype_driver >= 0)
2228                 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
2229 }
2230
2231 /******************************************************************************
2232  ******************************** AENQ Handlers *******************************
2233  *****************************************************************************/
2234 static void ena_update_on_link_change(void *adapter_data,
2235                                       struct ena_admin_aenq_entry *aenq_e)
2236 {
2237         struct rte_eth_dev *eth_dev;
2238         struct ena_adapter *adapter;
2239         struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2240         uint32_t status;
2241
2242         adapter = (struct ena_adapter *)adapter_data;
2243         aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2244         eth_dev = adapter->rte_dev;
2245
2246         status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2247         adapter->link_status = status;
2248
2249         ena_link_update(eth_dev, 0);
2250         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2251 }
2252
2253 static void ena_notification(void *data,
2254                              struct ena_admin_aenq_entry *aenq_e)
2255 {
2256         struct ena_adapter *adapter = (struct ena_adapter *)data;
2257         struct ena_admin_ena_hw_hints *hints;
2258
2259         if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
2260                 RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n",
2261                         aenq_e->aenq_common_desc.group,
2262                         ENA_ADMIN_NOTIFICATION);
2263
2264         switch (aenq_e->aenq_common_desc.syndrom) {
2265         case ENA_ADMIN_UPDATE_HINTS:
2266                 hints = (struct ena_admin_ena_hw_hints *)
2267                         (&aenq_e->inline_data_w4);
2268                 ena_update_hints(adapter, hints);
2269                 break;
2270         default:
2271                 RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n",
2272                         aenq_e->aenq_common_desc.syndrom);
2273         }
2274 }
2275
2276 static void ena_keep_alive(void *adapter_data,
2277                            __rte_unused struct ena_admin_aenq_entry *aenq_e)
2278 {
2279         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2280
2281         adapter->timestamp_wd = rte_get_timer_cycles();
2282 }
2283
2284 /**
2285  * This handler will called for unknown event group or unimplemented handlers
2286  **/
2287 static void unimplemented_aenq_handler(__rte_unused void *data,
2288                                        __rte_unused struct ena_admin_aenq_entry *aenq_e)
2289 {
2290         RTE_LOG(ERR, PMD, "Unknown event was received or event with "
2291                           "unimplemented handler\n");
2292 }
2293
2294 static struct ena_aenq_handlers aenq_handlers = {
2295         .handlers = {
2296                 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
2297                 [ENA_ADMIN_NOTIFICATION] = ena_notification,
2298                 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
2299         },
2300         .unimplemented_handler = unimplemented_aenq_handler
2301 };