1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2023 Cisco Systems, Inc.
6 #include <vnet/dev/dev.h>
8 #include <dev_ena/ena.h>
9 #include <dev_ena/ena_inlines.h>
11 #define ENA_AENQ_POLL_INTERVAL 0.2
13 VLIB_REGISTER_LOG_CLASS (ena_log, static) = {
15 .subclass_name = "aenq",
19 ena_aenq_free (vlib_main_t *vm, vnet_dev_t *dev)
21 ena_device_t *ed = vnet_dev_get_data (dev);
25 ASSERT (ed->aenq_started == 0);
27 vnet_dev_dma_mem_free (vm, dev, ed->aenq.entries);
33 ena_aenq_olloc (vlib_main_t *vm, vnet_dev_t *dev, u16 depth)
35 ena_device_t *ed = vnet_dev_get_data (dev);
36 u32 alloc_sz = sizeof (ena_aenq_entry_t) * depth;
41 ASSERT (ed->aenq.entries == 0);
43 if ((rv = vnet_dev_dma_mem_alloc (vm, dev, alloc_sz, 0,
44 (void **) &ed->aenq.entries)))
47 ed->aenq.depth = depth;
51 ena_aenq_free (vm, dev);
55 static ena_aenq_entry_t *
56 ena_get_next_aenq_entry (vnet_dev_t *dev)
58 ena_device_t *ed = vnet_dev_get_data (dev);
59 u16 index = ed->aenq.head & pow2_mask (ENA_ASYNC_QUEUE_LOG2_DEPTH);
60 u16 phase = 1 & (ed->aenq.head >> ENA_ASYNC_QUEUE_LOG2_DEPTH);
61 ena_aenq_entry_t *e = ed->aenq.entries + index;
63 if (e->phase != phase)
72 ena_aenq_poll (vlib_main_t *vm, vnet_dev_t *dev)
76 while ((ae = ena_get_next_aenq_entry (dev)))
78 ena_device_t *ed = vnet_dev_get_data (dev);
79 vnet_dev_port_state_changes_t changes = {};
81 log_debug (dev, "aenq: group %u syndrome %u phase %u timestamp %lu",
82 ae->group, ae->syndrome, ae->phase, ae->timestamp);
86 case ENA_AENQ_GROUP_LINK_CHANGE:
87 log_debug (dev, "link_change: status %u",
88 ae->link_change.link_status);
89 changes.link_state = 1;
90 changes.change.link_state = 1;
91 foreach_vnet_dev_port (p, dev)
92 vnet_dev_port_state_change (vm, p, changes);
95 case ENA_AENQ_GROUP_NOTIFICATION:
96 log_warn (dev, "unhandled AENQ notification received [syndrome %u]",
100 case ENA_AENQ_GROUP_KEEP_ALIVE:
101 if (ae->keep_alive.rx_drops || ae->keep_alive.tx_drops)
102 log_debug (dev, "keep_alive: rx_drops %lu tx_drops %lu",
103 ae->keep_alive.rx_drops, ae->keep_alive.tx_drops);
104 ed->aenq.rx_drops = ae->keep_alive.rx_drops - ed->aenq.rx_drops0;
105 ed->aenq.tx_drops = ae->keep_alive.tx_drops - ed->aenq.tx_drops0;
106 ed->aenq.last_keepalive = vlib_time_now (vm);
110 log_debug (dev, "unknown aenq entry (group %u) %U", ae->group,
111 format_hexdump, ae, sizeof (*ae));
117 ena_aenq_start (vlib_main_t *vm, vnet_dev_t *dev)
119 ena_device_t *ed = vnet_dev_get_data (dev);
120 u16 depth = ed->aenq.depth;
121 u32 alloc_sz = sizeof (ena_aenq_entry_t) * depth;
123 ASSERT (ed->aenq_started == 0);
124 ASSERT (ed->aq_started == 1);
126 ena_reg_aenq_caps_t aenq_caps = {
128 .entry_size = sizeof (ena_aenq_entry_t),
131 if (ena_aq_feature_is_supported (dev, ENA_ADMIN_FEAT_ID_AENQ_CONFIG))
133 ena_aq_feat_aenq_config_t aenq;
136 if ((rv = ena_aq_get_feature (vm, dev, ENA_ADMIN_FEAT_ID_AENQ_CONFIG,
139 log_err (dev, "aenq_start: get_Feature(AENQ_CONFIG) failed");
143 aenq.enabled_groups.link_change = 1;
144 aenq.enabled_groups.fatal_error = 1;
145 aenq.enabled_groups.warning = 1;
146 aenq.enabled_groups.notification = 1;
147 aenq.enabled_groups.keep_alive = 1;
148 aenq.enabled_groups.as_u32 &= aenq.supported_groups.as_u32;
149 aenq.supported_groups.as_u32 = 0;
151 if ((rv = ena_aq_set_feature (vm, dev, ENA_ADMIN_FEAT_ID_AENQ_CONFIG,
154 log_err (dev, "aenq_start: set_Feature(AENQ_CONFIG) failed");
159 clib_memset (ed->aenq.entries, 0, alloc_sz);
160 ed->aenq.head = depth;
162 ena_reg_set_dma_addr (vm, dev, ENA_REG_AENQ_BASE_LO, ENA_REG_AENQ_BASE_HI,
165 ena_reg_write (dev, ENA_REG_AENQ_CAPS, &aenq_caps);
166 ena_reg_write (dev, ENA_REG_AENQ_HEAD_DB, &(u32){ depth });
168 ed->aenq_started = 1;
170 vnet_dev_poll_dev_add (vm, dev, ENA_AENQ_POLL_INTERVAL, ena_aenq_poll);
176 ena_aenq_stop (vlib_main_t *vm, vnet_dev_t *dev)
178 ena_device_t *ed = vnet_dev_get_data (dev);
179 if (ed->aenq_started == 1)
181 ena_reg_aenq_caps_t aenq_caps = {};
182 vnet_dev_poll_dev_remove (vm, dev, ena_aenq_poll);
183 ena_reg_write (dev, ENA_REG_AENQ_CAPS, &aenq_caps);
184 ed->aenq_started = 0;