1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
7 #include <vnet/dev/dev.h>
8 #include <vnet/dev/pci.h>
9 #include <vnet/dev/counters.h>
10 #include <dev_iavf/iavf.h>
11 #include <dev_iavf/virtchnl.h>
12 #include <vnet/ethernet/ethernet.h>
14 #define IIAVF_AQ_LARGE_BUF 512
15 #define IIAVF_AQ_ATQ_LEN 4
16 #define IIAVF_AQ_ARQ_LEN 16
18 VLIB_REGISTER_LOG_CLASS (iavf_log, static) = {
20 .subclass_name = "adminq",
23 struct iavf_adminq_dma_mem
25 iavf_aq_desc_t atq[IIAVF_AQ_ATQ_LEN];
26 iavf_aq_desc_t arq[IIAVF_AQ_ARQ_LEN];
29 u8 data[IIAVF_AQ_BUF_SIZE];
30 } atq_bufs[IIAVF_AQ_ATQ_LEN];
33 u8 data[IIAVF_AQ_BUF_SIZE];
34 } arq_bufs[IIAVF_AQ_ARQ_LEN];
37 static_always_inline int
38 iavf_aq_desc_is_done (iavf_aq_desc_t *d)
40 iavf_aq_desc_flags_t flags;
41 flags.as_u16 = __atomic_load_n (&d->flags.as_u16, __ATOMIC_ACQUIRE);
46 format_iavf_aq_desc_flags (u8 *s, va_list *args)
48 iavf_aq_desc_flags_t f = va_arg (*args, iavf_aq_desc_flags_t);
54 char str[] = #v, *sp = str; \
61 vec_add1 (s, (u8) toupper (sp++[0])); \
63 foreach_iavf_aq_desc_flag
69 format_iavf_aq_desc_retval (u8 *s, va_list *args)
71 iavf_aq_desc_retval_t rv = va_arg (*args, u32);
74 #define _(a, b) [a] = #b,
75 foreach_iavf_aq_desc_retval
79 if (rv >= ARRAY_LEN (retvals) || retvals[rv] == 0)
80 return format (s, "UNKNOWN(%d)", rv);
82 return format (s, "%s", retvals[rv]);
86 format_iavf_aq_desc (u8 *s, va_list *args)
88 iavf_aq_desc_t *d = va_arg (*args, iavf_aq_desc_t *);
89 u32 indent = format_get_indent (s);
91 s = format (s, "opcode 0x%04x datalen %u retval %U (%u) flags %U", d->opcode,
92 d->datalen, format_iavf_aq_desc_retval, d->retval, d->retval,
93 format_iavf_aq_desc_flags, d->flags);
95 if (d->opcode == IIAVF_AQ_DESC_OP_SEND_TO_PF ||
96 d->opcode == IIAVF_AQ_DESC_OP_MESSAGE_FROM_PF)
99 format (s, "\n%Uv_opcode %U (%u) v_retval %U (%d) buf_dma_addr 0x%lx",
100 format_white_space, indent, format_virtchnl_op_name,
101 d->v_opcode, d->v_opcode, format_virtchnl_status, d->v_retval,
102 d->v_retval, (uword) d->param2 << 32 | d->param3);
107 s, "\n%Ucookie_hi 0x%x cookie_lo 0x%x params %08x %08x %08x %08x",
108 format_white_space, indent, d->cookie_hi, d->cookie_lo, d->param0,
109 d->param1, d->param2, d->param3);
115 iavf_aq_alloc (vlib_main_t *vm, vnet_dev_t *dev)
117 iavf_device_t *ad = vnet_dev_get_data (dev);
118 return vnet_dev_dma_mem_alloc (vm, dev, sizeof (iavf_adminq_dma_mem_t), 0,
119 (void **) &ad->aq_mem);
123 iavf_aq_free (vlib_main_t *vm, vnet_dev_t *dev)
125 iavf_device_t *ad = vnet_dev_get_data (dev);
126 vnet_dev_dma_mem_free (vm, dev, ad->aq_mem);
130 iavf_aq_arq_slot_init (vlib_main_t *vm, vnet_dev_t *dev, u16 slot)
132 iavf_device_t *ad = vnet_dev_get_data (dev);
133 u64 pa = vnet_dev_get_dma_addr (vm, dev, ad->aq_mem->arq_bufs + slot);
134 ad->aq_mem->arq[slot] = (iavf_aq_desc_t){
136 .flags.lb = IIAVF_AQ_BUF_SIZE > IIAVF_AQ_LARGE_BUF,
137 .datalen = sizeof (ad->aq_mem->arq_bufs[0].data),
138 .addr_hi = (u32) (pa >> 32),
144 iavf_aq_poll (vlib_main_t *vm, vnet_dev_t *dev)
146 iavf_device_t *ad = vnet_dev_get_data (dev);
150 while (iavf_aq_arq_next_acq (vm, dev, &d, &b, 0))
153 log_debug (dev, "poll[%u] flags %x %U op %u v_op %u", ad->arq_next_slot,
154 d->flags.as_u16, format_iavf_aq_desc_flags, d->flags,
155 d->opcode, d->v_opcode);
156 if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
157 ((d->flags.buf) == 0))
159 log_err (dev, "event message error");
162 vec_add1 (ad->events, *(virtchnl_pf_event_t *) b);
163 iavf_aq_arq_next_rel (vm, dev);
166 if (vec_len (ad->events))
168 virtchnl_pf_event_t *e;
169 char *virtchnl_event_names[] = {
170 #define _(v, n) [v] = #n,
171 foreach_virtchnl_event_code
175 vec_foreach (e, ad->events)
177 log_debug (dev, "event %s (%u) sev %d",
178 virtchnl_event_names[e->event], e->event, e->severity);
180 if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
182 vnet_dev_port_state_changes_t changes = {};
183 vnet_dev_port_t *port = vnet_dev_get_port_by_id (dev, 0);
187 iavf_port_t *ap = vnet_dev_get_port_data (port);
191 if (ap->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
193 link_up = e->event_data.link_event_adv.link_status;
194 speed = e->event_data.link_event_adv.link_speed;
198 const u32 speed_table[8] = { 100, 1000, 10000, 40000,
199 20000, 25000, 2500, 5000 };
201 link_up = e->event_data.link_event.link_status;
202 speed = e->event_data.link_event.link_speed;
204 if (count_set_bits (speed) == 1 && speed &&
206 speed = speed_table[get_lowest_set_bit_index (speed)];
211 "unsupported link speed value "
218 log_debug (dev, "LINK_CHANGE speed %u state %u", speed,
221 if (port->link_up != link_up)
223 changes.change.link_state = 1;
224 changes.link_state = link_up;
225 log_debug (dev, "link state changed to %s",
226 link_up ? "up" : "down");
229 if (port->speed != speed * 1000)
231 changes.change.link_speed = 1;
232 changes.link_speed = speed * 1000;
233 log_debug (dev, "link speed changed to %u Mbps", speed);
236 if (changes.change.any)
237 vnet_dev_port_state_change (vm, port, changes);
241 vec_reset_length (ad->events);
246 iavf_irq_0_set_state (iavf_device_t *ad, int enable)
248 u32 dyn_ctl0 = 0, icr0_ena = 0;
250 dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
252 iavf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
253 iavf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
262 icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
264 dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
265 dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
266 dyn_ctl0 |= (2 << 3); /* [4:3] ITR Index, 11b = No ITR update */
267 dyn_ctl0 |= ((IAVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
269 iavf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
270 iavf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
275 iavf_adminq_msix_handler (vlib_main_t *vm, vnet_dev_t *dev, u16 line)
277 log_debug (dev, "MSI-X interrupt 0 received");
278 vnet_dev_process_call_op_no_wait (vm, dev, iavf_aq_poll);
282 iavf_adminq_intx_handler (vlib_main_t *vm, vnet_dev_t *dev)
284 iavf_adminq_msix_handler (vm, dev, 0);
288 iavf_aq_init (vlib_main_t *vm, vnet_dev_t *dev)
290 iavf_device_t *ad = vnet_dev_get_data (dev);
294 /* disable both tx and rx adminq queue */
295 iavf_reg_write (ad, IAVF_ATQLEN, 0);
296 iavf_reg_write (ad, IAVF_ARQLEN, 0);
298 len = IIAVF_AQ_ATQ_LEN;
299 pa = vnet_dev_get_dma_addr (vm, dev, &ad->aq_mem->atq);
300 iavf_reg_write (ad, IAVF_ATQT, 0); /* Tail */
301 iavf_reg_write (ad, IAVF_ATQH, 0); /* Head */
302 iavf_reg_write (ad, IAVF_ATQBAL, (u32) pa); /* Base Address Low */
303 iavf_reg_write (ad, IAVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
304 iavf_reg_write (ad, IAVF_ATQLEN, len | (1ULL << 31)); /* len & ena */
306 len = IIAVF_AQ_ARQ_LEN;
307 pa = vnet_dev_get_dma_addr (vm, dev, ad->aq_mem->arq);
308 iavf_reg_write (ad, IAVF_ARQT, 0); /* Tail */
309 iavf_reg_write (ad, IAVF_ARQH, 0); /* Head */
310 iavf_reg_write (ad, IAVF_ARQBAL, (u32) pa); /* Base Address Low */
311 iavf_reg_write (ad, IAVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
312 iavf_reg_write (ad, IAVF_ARQLEN, len | (1ULL << 31)); /* len & ena */
314 for (int i = 0; i < len; i++)
315 iavf_aq_arq_slot_init (vm, dev, i);
316 iavf_reg_write (ad, IAVF_ARQT, len - 1); /* Tail */
318 ad->atq_next_slot = 0;
319 ad->arq_next_slot = 0;
320 ad->adminq_active = 1;
324 iavf_aq_poll_on (vlib_main_t *vm, vnet_dev_t *dev)
326 iavf_device_t *ad = vnet_dev_get_data (dev);
328 vnet_dev_poll_dev_add (vm, dev, IIAVF_AQ_POLL_INTERVAL, iavf_aq_poll);
330 if (vnet_dev_get_pci_n_msix_interrupts (dev) > 0)
332 vnet_dev_pci_msix_add_handler (vm, dev, iavf_adminq_msix_handler, 0, 1);
333 vnet_dev_pci_msix_enable (vm, dev, 0, 1);
336 vnet_dev_pci_intx_add_handler (vm, dev, iavf_adminq_intx_handler);
338 iavf_irq_0_set_state (ad, 1);
342 iavf_aq_poll_off (vlib_main_t *vm, vnet_dev_t *dev)
344 iavf_device_t *ad = vnet_dev_get_data (dev);
346 iavf_irq_0_set_state (ad, 0);
348 vnet_dev_poll_dev_remove (vm, dev, iavf_aq_poll);
350 if (vnet_dev_get_pci_n_msix_interrupts (dev) > 0)
352 vnet_dev_pci_msix_disable (vm, dev, 0, 1);
353 vnet_dev_pci_msix_remove_handler (vm, dev, 0, 1);
356 vnet_dev_pci_intx_remove_handler (vm, dev);
360 iavf_aq_atq_enq (vlib_main_t *vm, vnet_dev_t *dev, iavf_aq_desc_t *desc,
361 const u8 *data, u16 len, f64 timeout)
363 iavf_device_t *ad = vnet_dev_get_data (dev);
364 iavf_aq_desc_t *d = ad->aq_mem->atq + ad->atq_next_slot;
365 u8 *buf = ad->aq_mem->atq_bufs[ad->atq_next_slot].data;
367 ASSERT (len <= IIAVF_AQ_BUF_SIZE);
373 u64 pa = vnet_dev_get_dma_addr (vm, dev, buf);
375 d->addr_hi = (u32) (pa >> 32);
376 d->addr_lo = (u32) pa;
379 d->flags.lb = len > IIAVF_AQ_LARGE_BUF;
380 clib_memcpy_fast (buf, data, len);
383 log_debug (dev, "slot %u\n %U", ad->atq_next_slot, format_iavf_aq_desc, d);
385 ad->atq_next_slot = (ad->atq_next_slot + 1) % IIAVF_AQ_ATQ_LEN;
386 iavf_reg_write (ad, IAVF_ATQT, ad->atq_next_slot);
391 f64 suspend_time = timeout / 62;
392 f64 t0 = vlib_time_now (vm);
393 iavf_aq_desc_flags_t flags;
397 flags.as_u16 = __atomic_load_n (&d->flags.as_u16, __ATOMIC_ACQUIRE);
401 log_err (dev, "adminq enqueue error [opcode 0x%x, retval %d]",
402 d->opcode, d->retval);
403 return VNET_DEV_ERR_BUG;
406 if (flags.dd && flags.cmp)
409 if (vlib_time_now (vm) - t0 > timeout)
411 log_err (dev, "adminq enqueue timeout [opcode 0x%x]", d->opcode);
412 return VNET_DEV_ERR_TIMEOUT;
415 vlib_process_suspend (vm, suspend_time);
424 iavf_aq_deinit (vlib_main_t *vm, vnet_dev_t *dev)
426 iavf_device_t *ad = vnet_dev_get_data (dev);
427 if (ad->adminq_active)
430 .opcode = IIAVF_AQ_DESC_OP_QUEUE_SHUTDOWN,
431 .driver_unloading = 1,
432 .flags = { .si = 1 },
434 log_debug (dev, "adminq queue shutdown");
435 iavf_aq_atq_enq (vm, dev, &d, 0, 0, 0);
436 ad->adminq_active = 0;
441 iavf_aq_arq_next_acq (vlib_main_t *vm, vnet_dev_t *dev, iavf_aq_desc_t **dp,
442 u8 **bp, f64 timeout)
444 iavf_device_t *ad = vnet_dev_get_data (dev);
445 iavf_aq_desc_t *d = ad->aq_mem->arq + ad->arq_next_slot;
449 f64 suspend_time = timeout / 62;
450 f64 t0 = vlib_time_now (vm);
452 while (!iavf_aq_desc_is_done (d))
454 if (vlib_time_now (vm) - t0 > timeout)
457 vlib_process_suspend (vm, suspend_time);
462 else if (!iavf_aq_desc_is_done (d))
465 log_debug (dev, "arq desc acquired in slot %u\n %U", ad->arq_next_slot,
466 format_iavf_aq_desc, d);
468 *bp = ad->aq_mem->arq_bufs[ad->arq_next_slot].data;
473 iavf_aq_arq_next_rel (vlib_main_t *vm, vnet_dev_t *dev)
475 iavf_device_t *ad = vnet_dev_get_data (dev);
476 ASSERT (iavf_aq_desc_is_done (ad->aq_mem->arq + ad->arq_next_slot));
477 iavf_aq_arq_slot_init (vm, dev, ad->arq_next_slot);
478 iavf_reg_write (ad, IAVF_ARQT, ad->arq_next_slot);
480 ad->arq_next_slot = (ad->arq_next_slot + 1) % IIAVF_AQ_ARQ_LEN;