1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
7 #include <vnet/dev/dev.h>
8 #include <vnet/dev/pci.h>
9 #include <vnet/dev/counters.h>
10 #include <dev_iavf/iavf.h>
11 #include <dev_iavf/iavf_regs.h>
12 #include <dev_iavf/virtchnl.h>
13 #include <vnet/ethernet/ethernet.h>
15 #define IIAVF_AQ_LARGE_BUF 512
16 #define IIAVF_AQ_ATQ_LEN 4
17 #define IIAVF_AQ_ARQ_LEN 16
19 VLIB_REGISTER_LOG_CLASS (iavf_log, static) = {
21 .subclass_name = "adminq",
24 struct iavf_adminq_dma_mem
26 iavf_aq_desc_t atq[IIAVF_AQ_ATQ_LEN];
27 iavf_aq_desc_t arq[IIAVF_AQ_ARQ_LEN];
30 u8 data[IIAVF_AQ_BUF_SIZE];
31 } atq_bufs[IIAVF_AQ_ATQ_LEN];
34 u8 data[IIAVF_AQ_BUF_SIZE];
35 } arq_bufs[IIAVF_AQ_ARQ_LEN];
38 static const iavf_dyn_ctl dyn_ctl0_disable = {
42 static const iavf_dyn_ctl dyn_ctl0_enable = {
48 static const iavf_vfint_icr0_ena1 icr0_ena1_aq_enable = {
53 iavf_irq_0_disable (iavf_device_t *ad)
55 iavf_reg_write (ad, IAVF_VFINT_ICR0_ENA1, 0);
56 iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_disable.as_u32);
61 iavf_irq_0_enable (iavf_device_t *ad)
63 iavf_reg_write (ad, IAVF_VFINT_ICR0_ENA1, icr0_ena1_aq_enable.as_u32);
64 iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_enable.as_u32);
68 static_always_inline int
69 iavf_aq_desc_is_done (iavf_aq_desc_t *d)
71 iavf_aq_desc_flags_t flags;
72 flags.as_u16 = __atomic_load_n (&d->flags.as_u16, __ATOMIC_ACQUIRE);
77 format_iavf_aq_desc_flags (u8 *s, va_list *args)
79 iavf_aq_desc_flags_t f = va_arg (*args, iavf_aq_desc_flags_t);
85 char str[] = #v, *sp = str; \
92 vec_add1 (s, (u8) toupper (sp++[0])); \
94 foreach_iavf_aq_desc_flag
100 format_iavf_aq_desc_retval (u8 *s, va_list *args)
102 iavf_aq_desc_retval_t rv = va_arg (*args, u32);
105 #define _(a, b) [a] = #b,
106 foreach_iavf_aq_desc_retval
110 if (rv >= ARRAY_LEN (retvals) || retvals[rv] == 0)
111 return format (s, "UNKNOWN(%d)", rv);
113 return format (s, "%s", retvals[rv]);
117 format_iavf_aq_desc (u8 *s, va_list *args)
119 iavf_aq_desc_t *d = va_arg (*args, iavf_aq_desc_t *);
120 u32 indent = format_get_indent (s);
122 s = format (s, "opcode 0x%04x datalen %u retval %U (%u) flags %U", d->opcode,
123 d->datalen, format_iavf_aq_desc_retval, d->retval, d->retval,
124 format_iavf_aq_desc_flags, d->flags);
126 if (d->opcode == IIAVF_AQ_DESC_OP_SEND_TO_PF ||
127 d->opcode == IIAVF_AQ_DESC_OP_MESSAGE_FROM_PF)
130 format (s, "\n%Uv_opcode %U (%u) v_retval %U (%d) buf_dma_addr 0x%lx",
131 format_white_space, indent, format_virtchnl_op_name,
132 d->v_opcode, d->v_opcode, format_virtchnl_status, d->v_retval,
133 d->v_retval, (uword) d->param2 << 32 | d->param3);
138 s, "\n%Ucookie_hi 0x%x cookie_lo 0x%x params %08x %08x %08x %08x",
139 format_white_space, indent, d->cookie_hi, d->cookie_lo, d->param0,
140 d->param1, d->param2, d->param3);
146 iavf_aq_alloc (vlib_main_t *vm, vnet_dev_t *dev)
148 iavf_device_t *ad = vnet_dev_get_data (dev);
149 return vnet_dev_dma_mem_alloc (vm, dev, sizeof (iavf_adminq_dma_mem_t), 0,
150 (void **) &ad->aq_mem);
154 iavf_aq_free (vlib_main_t *vm, vnet_dev_t *dev)
156 iavf_device_t *ad = vnet_dev_get_data (dev);
157 vnet_dev_dma_mem_free (vm, dev, ad->aq_mem);
161 iavf_aq_arq_slot_init (vlib_main_t *vm, vnet_dev_t *dev, u16 slot)
163 iavf_device_t *ad = vnet_dev_get_data (dev);
164 u64 pa = vnet_dev_get_dma_addr (vm, dev, ad->aq_mem->arq_bufs + slot);
165 ad->aq_mem->arq[slot] = (iavf_aq_desc_t){
167 .flags.lb = IIAVF_AQ_BUF_SIZE > IIAVF_AQ_LARGE_BUF,
168 .datalen = sizeof (ad->aq_mem->arq_bufs[0].data),
169 .addr_hi = (u32) (pa >> 32),
175 iavf_aq_poll (vlib_main_t *vm, vnet_dev_t *dev)
177 iavf_device_t *ad = vnet_dev_get_data (dev);
181 while (iavf_aq_arq_next_acq (vm, dev, &d, &b, 0))
184 log_debug (dev, "poll[%u] flags %x %U op %u v_op %u", ad->arq_next_slot,
185 d->flags.as_u16, format_iavf_aq_desc_flags, d->flags,
186 d->opcode, d->v_opcode);
187 if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
188 ((d->flags.buf) == 0))
190 log_err (dev, "event message error");
193 vec_add1 (ad->events, *(virtchnl_pf_event_t *) b);
194 iavf_aq_arq_next_rel (vm, dev);
197 if (vec_len (ad->events))
199 virtchnl_pf_event_t *e;
200 char *virtchnl_event_names[] = {
201 #define _(v, n) [v] = #n,
202 foreach_virtchnl_event_code
206 vec_foreach (e, ad->events)
208 log_debug (dev, "event %s (%u) sev %d",
209 virtchnl_event_names[e->event], e->event, e->severity);
211 if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
213 vnet_dev_port_state_changes_t changes = {};
214 vnet_dev_port_t *port = vnet_dev_get_port_by_id (dev, 0);
218 iavf_port_t *ap = vnet_dev_get_port_data (port);
222 if (ap->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
224 link_up = e->event_data.link_event_adv.link_status;
225 speed = e->event_data.link_event_adv.link_speed;
229 const u32 speed_table[8] = { 100, 1000, 10000, 40000,
230 20000, 25000, 2500, 5000 };
232 link_up = e->event_data.link_event.link_status;
233 speed = e->event_data.link_event.link_speed;
235 if (count_set_bits (speed) == 1 && speed &&
237 speed = speed_table[get_lowest_set_bit_index (speed)];
242 "unsupported link speed value "
249 log_debug (dev, "LINK_CHANGE speed %u state %u", speed,
252 if (port->link_up != link_up)
254 changes.change.link_state = 1;
255 changes.link_state = link_up;
256 log_debug (dev, "link state changed to %s",
257 link_up ? "up" : "down");
260 if (port->speed != speed * 1000)
262 changes.change.link_speed = 1;
263 changes.link_speed = speed * 1000;
264 log_debug (dev, "link speed changed to %u Mbps", speed);
267 if (changes.change.any)
268 vnet_dev_port_state_change (vm, port, changes);
272 vec_reset_length (ad->events);
277 iavf_adminq_msix_handler (vlib_main_t *vm, vnet_dev_t *dev, u16 line)
279 iavf_device_t *ad = vnet_dev_get_data (dev);
280 iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_enable.as_u32);
281 log_debug (dev, "MSI-X interrupt %u received", line);
282 vnet_dev_process_call_op_no_wait (vm, dev, iavf_aq_poll);
286 iavf_adminq_intx_handler (vlib_main_t *vm, vnet_dev_t *dev)
288 iavf_adminq_msix_handler (vm, dev, 0);
292 iavf_aq_init (vlib_main_t *vm, vnet_dev_t *dev)
294 iavf_device_t *ad = vnet_dev_get_data (dev);
298 /* disable both tx and rx adminq queue */
299 iavf_reg_write (ad, IAVF_ATQLEN, 0);
300 iavf_reg_write (ad, IAVF_ARQLEN, 0);
302 len = IIAVF_AQ_ATQ_LEN;
303 pa = vnet_dev_get_dma_addr (vm, dev, &ad->aq_mem->atq);
304 iavf_reg_write (ad, IAVF_ATQT, 0); /* Tail */
305 iavf_reg_write (ad, IAVF_ATQH, 0); /* Head */
306 iavf_reg_write (ad, IAVF_ATQBAL, (u32) pa); /* Base Address Low */
307 iavf_reg_write (ad, IAVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
308 iavf_reg_write (ad, IAVF_ATQLEN, len | (1ULL << 31)); /* len & ena */
310 len = IIAVF_AQ_ARQ_LEN;
311 pa = vnet_dev_get_dma_addr (vm, dev, ad->aq_mem->arq);
312 iavf_reg_write (ad, IAVF_ARQT, 0); /* Tail */
313 iavf_reg_write (ad, IAVF_ARQH, 0); /* Head */
314 iavf_reg_write (ad, IAVF_ARQBAL, (u32) pa); /* Base Address Low */
315 iavf_reg_write (ad, IAVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
316 iavf_reg_write (ad, IAVF_ARQLEN, len | (1ULL << 31)); /* len & ena */
318 for (int i = 0; i < len; i++)
319 iavf_aq_arq_slot_init (vm, dev, i);
320 iavf_reg_write (ad, IAVF_ARQT, len - 1); /* Tail */
322 ad->atq_next_slot = 0;
323 ad->arq_next_slot = 0;
324 ad->adminq_active = 1;
328 iavf_aq_poll_on (vlib_main_t *vm, vnet_dev_t *dev)
330 iavf_device_t *ad = vnet_dev_get_data (dev);
332 vnet_dev_poll_dev_add (vm, dev, IIAVF_AQ_POLL_INTERVAL, iavf_aq_poll);
334 if (vnet_dev_get_pci_n_msix_interrupts (dev) > 0)
336 vnet_dev_pci_msix_add_handler (vm, dev, iavf_adminq_msix_handler, 0, 1);
337 vnet_dev_pci_msix_enable (vm, dev, 0, 1);
340 vnet_dev_pci_intx_add_handler (vm, dev, iavf_adminq_intx_handler);
342 iavf_irq_0_enable (ad);
346 iavf_aq_poll_off (vlib_main_t *vm, vnet_dev_t *dev)
348 iavf_device_t *ad = vnet_dev_get_data (dev);
350 iavf_irq_0_disable (ad);
352 vnet_dev_poll_dev_remove (vm, dev, iavf_aq_poll);
354 if (vnet_dev_get_pci_n_msix_interrupts (dev) > 0)
356 vnet_dev_pci_msix_disable (vm, dev, 0, 1);
357 vnet_dev_pci_msix_remove_handler (vm, dev, 0, 1);
360 vnet_dev_pci_intx_remove_handler (vm, dev);
364 iavf_aq_atq_enq (vlib_main_t *vm, vnet_dev_t *dev, iavf_aq_desc_t *desc,
365 const u8 *data, u16 len, f64 timeout)
367 iavf_device_t *ad = vnet_dev_get_data (dev);
368 iavf_aq_desc_t *d = ad->aq_mem->atq + ad->atq_next_slot;
369 u8 *buf = ad->aq_mem->atq_bufs[ad->atq_next_slot].data;
371 ASSERT (len <= IIAVF_AQ_BUF_SIZE);
377 u64 pa = vnet_dev_get_dma_addr (vm, dev, buf);
379 d->addr_hi = (u32) (pa >> 32);
380 d->addr_lo = (u32) pa;
383 d->flags.lb = len > IIAVF_AQ_LARGE_BUF;
384 clib_memcpy_fast (buf, data, len);
387 log_debug (dev, "slot %u\n %U", ad->atq_next_slot, format_iavf_aq_desc, d);
389 ad->atq_next_slot = (ad->atq_next_slot + 1) % IIAVF_AQ_ATQ_LEN;
390 iavf_reg_write (ad, IAVF_ATQT, ad->atq_next_slot);
395 f64 suspend_time = timeout / 62;
396 f64 t0 = vlib_time_now (vm);
397 iavf_aq_desc_flags_t flags;
401 flags.as_u16 = __atomic_load_n (&d->flags.as_u16, __ATOMIC_ACQUIRE);
405 log_err (dev, "adminq enqueue error [opcode 0x%x, retval %d]",
406 d->opcode, d->retval);
407 return VNET_DEV_ERR_BUG;
410 if (flags.dd && flags.cmp)
413 if (vlib_time_now (vm) - t0 > timeout)
415 log_err (dev, "adminq enqueue timeout [opcode 0x%x]", d->opcode);
416 return VNET_DEV_ERR_TIMEOUT;
419 vlib_process_suspend (vm, suspend_time);
428 iavf_aq_deinit (vlib_main_t *vm, vnet_dev_t *dev)
430 iavf_device_t *ad = vnet_dev_get_data (dev);
431 if (ad->adminq_active)
434 .opcode = IIAVF_AQ_DESC_OP_QUEUE_SHUTDOWN,
435 .driver_unloading = 1,
436 .flags = { .si = 1 },
438 log_debug (dev, "adminq queue shutdown");
439 iavf_aq_atq_enq (vm, dev, &d, 0, 0, 0);
440 ad->adminq_active = 0;
445 iavf_aq_arq_next_acq (vlib_main_t *vm, vnet_dev_t *dev, iavf_aq_desc_t **dp,
446 u8 **bp, f64 timeout)
448 iavf_device_t *ad = vnet_dev_get_data (dev);
449 iavf_aq_desc_t *d = ad->aq_mem->arq + ad->arq_next_slot;
453 f64 suspend_time = timeout / 62;
454 f64 t0 = vlib_time_now (vm);
456 while (!iavf_aq_desc_is_done (d))
458 if (vlib_time_now (vm) - t0 > timeout)
461 vlib_process_suspend (vm, suspend_time);
466 else if (!iavf_aq_desc_is_done (d))
469 log_debug (dev, "arq desc acquired in slot %u\n %U", ad->arq_next_slot,
470 format_iavf_aq_desc, d);
472 *bp = ad->aq_mem->arq_bufs[ad->arq_next_slot].data;
477 iavf_aq_arq_next_rel (vlib_main_t *vm, vnet_dev_t *dev)
479 iavf_device_t *ad = vnet_dev_get_data (dev);
480 ASSERT (iavf_aq_desc_is_done (ad->aq_mem->arq + ad->arq_next_slot));
481 iavf_aq_arq_slot_init (vm, dev, ad->arq_next_slot);
482 iavf_reg_write (ad, IAVF_ARQT, ad->arq_next_slot);
484 ad->arq_next_slot = (ad->arq_next_slot + 1) % IIAVF_AQ_ARQ_LEN;