2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/interface/rx_queue_funcs.h>
24 #include <vnet/interface/tx_queue_funcs.h>
28 #define AVF_MBOX_LEN 64
29 #define AVF_MBOX_BUF_SZ 4096
30 #define AVF_RXQ_SZ 512
31 #define AVF_TXQ_SZ 512
32 #define AVF_ITR_INT 250
34 #define PCI_VENDOR_ID_INTEL 0x8086
35 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
36 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
37 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
39 VLIB_REGISTER_LOG_CLASS (avf_log) = {
43 VLIB_REGISTER_LOG_CLASS (avf_stats_log) = {
45 .subclass_name = "stats",
49 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier);
51 static pci_device_id_t avf_pci_device_ids[] = {
52 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_AVF},
53 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
54 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
58 const static char *virtchnl_event_names[] = {
59 #define _(v, n) [v] = #n,
60 foreach_virtchnl_event_code
66 AVF_IRQ_STATE_DISABLED,
67 AVF_IRQ_STATE_ENABLED,
68 AVF_IRQ_STATE_WB_ON_ITR,
72 avf_irq_0_set_state (avf_device_t * ad, avf_irq_state_t state)
74 u32 dyn_ctl0 = 0, icr0_ena = 0;
76 dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
78 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
79 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
82 if (state == AVF_IRQ_STATE_DISABLED)
88 icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
90 dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
91 dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
92 dyn_ctl0 |= (2 << 3); /* [4:3] ITR Index, 11b = No ITR update */
93 dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
95 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
96 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
101 avf_irq_n_set_state (avf_device_t * ad, u8 line, avf_irq_state_t state)
106 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
109 if (state == AVF_IRQ_STATE_DISABLED)
112 dyn_ctln |= (1 << 1); /* [1] Clear PBA */
113 if (state == AVF_IRQ_STATE_WB_ON_ITR)
115 /* minimal ITR interval, use ITR1 */
116 dyn_ctln |= (1 << 3); /* [4:3] ITR Index */
117 dyn_ctln |= ((32 / 2) << 5); /* [16:5] ITR Interval in 2us steps */
118 dyn_ctln |= (1 << 30); /* [30] Writeback on ITR */
122 /* configured ITR interval, use ITR0 */
123 dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
124 dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
127 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
133 avf_aq_desc_enq (vlib_main_t * vm, avf_device_t * ad, avf_aq_desc_t * dt,
136 clib_error_t *err = 0;
137 avf_aq_desc_t *d, dc;
138 f64 t0, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
140 d = &ad->atq[ad->atq_next_slot];
141 clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
142 d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
148 pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
149 d->addr_hi = (u32) (pa >> 32);
150 d->addr_lo = (u32) pa;
151 clib_memcpy_fast (ad->atq_bufs + ad->atq_next_slot * AVF_MBOX_BUF_SZ,
153 d->flags |= AVF_AQ_F_BUF;
156 if (ad->flags & AVF_DEVICE_F_ELOG)
157 clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
159 CLIB_MEMORY_BARRIER ();
160 ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
161 avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
164 t0 = vlib_time_now (vm);
166 vlib_process_suspend (vm, suspend_time);
168 if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
170 f64 t = vlib_time_now (vm) - t0;
171 if (t > AVF_AQ_ENQ_MAX_WAIT_TIME)
173 avf_log_err (ad, "aq_desc_enq failed (timeout %.3fs)", t);
174 err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
182 clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
183 if (d->flags & AVF_AQ_F_ERR)
184 return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
185 "%d]", d->opcode, d->retval);
188 if (ad->flags & AVF_DEVICE_F_ELOG)
190 ELOG_TYPE_DECLARE (el) =
192 .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
193 "datalen %d retval %d",
194 .format_args = "i4i2i2i2i2i2",
205 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
206 ed->dev_instance = ad->dev_instance;
207 ed->s_flags = dc.flags;
208 ed->r_flags = d->flags;
209 ed->opcode = dc.opcode;
210 ed->datalen = dc.datalen;
211 ed->retval = d->retval;
218 avf_cmd_rx_ctl_reg_write (vlib_main_t * vm, avf_device_t * ad, u32 reg,
222 avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
223 err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
225 if (ad->flags & AVF_DEVICE_F_ELOG)
227 ELOG_TYPE_DECLARE (el) =
229 .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
230 .format_args = "i4i4i4",
238 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
239 ed->dev_instance = ad->dev_instance;
247 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
253 vec_validate_aligned (ad->rxqs, qid, CLIB_CACHE_LINE_BYTES);
254 rxq = vec_elt_at_index (ad->rxqs, qid);
255 rxq->size = rxq_size;
257 rxq->descs = vlib_physmem_alloc_aligned_on_numa (vm, rxq->size *
258 sizeof (avf_rx_desc_t),
259 2 * CLIB_CACHE_LINE_BYTES,
262 rxq->buffer_pool_index =
263 vlib_buffer_pool_get_default_for_numa (vm, ad->numa_node);
266 return vlib_physmem_last_error (vm);
268 if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
271 clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
272 vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
273 rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
275 n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
276 rxq->buffer_pool_index);
279 return clib_error_return (0, "buffer allocation error");
281 rxq->n_enqueued = n_alloc;
282 avf_rx_desc_t *d = rxq->descs;
283 for (i = 0; i < n_alloc; i++)
285 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
286 if (ad->flags & AVF_DEVICE_F_VA_DMA)
287 d->qword[0] = vlib_buffer_get_va (b);
289 d->qword[0] = vlib_buffer_get_pa (vm, b);
293 ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
298 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
303 u8 bpi = vlib_buffer_pool_get_default_for_numa (vm,
306 if (qid >= ad->num_queue_pairs)
308 qid = qid % ad->num_queue_pairs;
309 txq = vec_elt_at_index (ad->txqs, qid);
310 ASSERT (txq->lock == 0);
311 clib_spinlock_init (&txq->lock);
312 ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
316 vec_validate_aligned (ad->txqs, qid, CLIB_CACHE_LINE_BYTES);
317 txq = vec_elt_at_index (ad->txqs, qid);
318 txq->size = txq_size;
321 /* Prepare a placeholder buffer(s) to maintain a 1-1 relationship between
322 * bufs and descs when a context descriptor is added in descs. Worst case
323 * every second descriptor is context descriptor and due to b->ref_count
324 * being u8 we need one for each block of 510 descriptors */
326 n = (txq->size / 510) + 1;
327 vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
329 if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
330 return clib_error_return (0, "buffer allocation error");
332 txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
333 sizeof (avf_tx_desc_t),
334 2 * CLIB_CACHE_LINE_BYTES,
337 return vlib_physmem_last_error (vm);
339 if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
342 vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
343 txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
345 /* initialize ring of pending RS slots */
346 clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
348 vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
349 vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
351 ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
359 } virtchnl_promisc_info_t;
362 avf_arq_slot_init (avf_device_t * ad, u16 slot)
365 u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
367 clib_memset (d, 0, sizeof (avf_aq_desc_t));
368 d->flags = AVF_AQ_F_BUF;
369 d->datalen = AVF_MBOX_BUF_SZ;
370 d->addr_hi = (u32) (pa >> 32);
371 d->addr_lo = (u32) pa;
375 avf_dma_addr (vlib_main_t * vm, avf_device_t * ad, void *p)
377 return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
378 pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
382 avf_adminq_init (vlib_main_t * vm, avf_device_t * ad)
387 /* VF MailBox Transmit */
388 clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
389 ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
391 pa = avf_dma_addr (vm, ad, ad->atq);
392 avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
393 avf_reg_write (ad, AVF_ATQH, 0); /* Head */
394 avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
395 avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
396 avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
398 /* VF MailBox Receive */
399 clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
400 ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
402 for (i = 0; i < AVF_MBOX_LEN; i++)
403 avf_arq_slot_init (ad, i);
405 pa = avf_dma_addr (vm, ad, ad->arq);
407 avf_reg_write (ad, AVF_ARQH, 0); /* Head */
408 avf_reg_write (ad, AVF_ARQT, 0); /* Head */
409 avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
410 avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
411 avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
412 avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
414 ad->atq_next_slot = 0;
415 ad->arq_next_slot = 0;
419 avf_send_to_pf (vlib_main_t * vm, avf_device_t * ad, virtchnl_ops_t op,
420 void *in, int in_len, void *out, int out_len)
423 avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
425 f64 t0, suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
427 /* adminq operations should be only done from process node after device
429 ASSERT ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0 ||
430 vlib_get_current_process_node_index (vm) == avf_process_node.index);
432 /* suppress interrupt in the next adminq receive slot
433 as we are going to wait for response
434 we only need interrupts when event is received */
435 d = &ad->arq[ad->arq_next_slot];
436 d->flags |= AVF_AQ_F_SI;
438 if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
441 t0 = vlib_time_now (vm);
443 head = avf_get_u32 (ad->bar0, AVF_ARQH);
445 if (ad->arq_next_slot == head)
447 f64 t = vlib_time_now (vm) - t0;
448 if (t > AVF_SEND_TO_PF_MAX_WAIT_TIME)
450 avf_log_err (ad, "send_to_pf failed (timeout %.3fs)", t);
451 return clib_error_return (0, "timeout");
453 vlib_process_suspend (vm, suspend_time);
458 d = &ad->arq[ad->arq_next_slot];
460 if (d->v_opcode == VIRTCHNL_OP_EVENT)
462 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
463 virtchnl_pf_event_t *e;
465 if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
466 ((d->flags & AVF_AQ_F_BUF) == 0))
467 return clib_error_return (0, "event message error");
469 vec_add2 (ad->events, e, 1);
470 clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
471 avf_arq_slot_init (ad, ad->arq_next_slot);
474 t0 = vlib_time_now (vm);
475 suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
479 if (d->v_opcode != op)
481 err = clib_error_return (0,
482 "unexpected message received [v_opcode = %u, "
483 "expected %u, v_retval %d]",
484 d->v_opcode, op, d->v_retval);
490 err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
491 d->v_opcode, d->v_retval);
495 if (out_len && d->flags & AVF_AQ_F_BUF)
497 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
498 clib_memcpy_fast (out, buf, out_len);
501 avf_arq_slot_init (ad, ad->arq_next_slot);
502 avf_reg_write (ad, AVF_ARQT, ad->arq_next_slot);
504 ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
508 if (ad->flags & AVF_DEVICE_F_ELOG)
510 ELOG_TYPE_DECLARE (el) =
512 .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
513 .format_args = "i4t4i4i4",
514 .n_enum_strings = VIRTCHNL_N_OPS,
516 #define _(v, n) [v] = #n,
528 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
529 ed->dev_instance = ad->dev_instance;
531 ed->v_opcode_val = op;
532 ed->v_retval = d->v_retval;
538 avf_op_version (vlib_main_t * vm, avf_device_t * ad,
539 virtchnl_version_info_t * ver)
541 clib_error_t *err = 0;
542 virtchnl_version_info_t myver = {
543 .major = VIRTCHNL_VERSION_MAJOR,
544 .minor = VIRTCHNL_VERSION_MINOR,
547 avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
549 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
550 sizeof (virtchnl_version_info_t), ver,
551 sizeof (virtchnl_version_info_t));
560 avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
561 virtchnl_vf_resource_t * res)
563 clib_error_t *err = 0;
564 u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
565 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
566 VIRTCHNL_VF_OFFLOAD_RX_POLLING |
567 VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_FDIR_PF |
568 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN_V2);
570 avf_log_debug (ad, "get_vf_resources: bitmap 0x%x (%U)", bitmap,
571 format_avf_vf_cap_flags, bitmap);
572 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
573 sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
579 "get_vf_resources: num_vsis %u num_queue_pairs %u "
580 "max_vectors %u max_mtu %u vf_cap_flags 0x%x (%U) "
581 "rss_key_size %u rss_lut_size %u",
582 res->num_vsis, res->num_queue_pairs, res->max_vectors,
583 res->max_mtu, res->vf_cap_flags, format_avf_vf_cap_flags,
584 res->vf_cap_flags, res->rss_key_size, res->rss_lut_size);
585 for (i = 0; i < res->num_vsis; i++)
588 "get_vf_resources_vsi[%u]: vsi_id %u num_queue_pairs %u vsi_type %u "
589 "qset_handle %u default_mac_addr %U",
590 i, res->vsi_res[i].vsi_id, res->vsi_res[i].num_queue_pairs,
591 res->vsi_res[i].vsi_type, res->vsi_res[i].qset_handle,
592 format_ethernet_address, res->vsi_res[i].default_mac_addr);
599 avf_op_config_rss_lut (vlib_main_t * vm, avf_device_t * ad)
601 int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
604 virtchnl_rss_lut_t *rl;
606 clib_memset (msg, 0, msg_len);
607 rl = (virtchnl_rss_lut_t *) msg;
608 rl->vsi_id = ad->vsi_id;
609 rl->lut_entries = ad->rss_lut_size;
610 for (i = 0; i < ad->rss_lut_size; i++)
611 rl->lut[i] = i % ad->n_rx_queues;
613 avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
614 rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
615 rl->lut, rl->lut_entries);
617 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
622 avf_op_config_rss_key (vlib_main_t * vm, avf_device_t * ad)
624 int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
627 virtchnl_rss_key_t *rk;
629 clib_memset (msg, 0, msg_len);
630 rk = (virtchnl_rss_key_t *) msg;
631 rk->vsi_id = ad->vsi_id;
632 rk->key_len = ad->rss_key_size;
633 u32 seed = random_default_seed ();
634 for (i = 0; i < ad->rss_key_size; i++)
635 rk->key[i] = (u8) random_u32 (&seed);
637 avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
638 rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
641 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
646 avf_op_disable_vlan_stripping (vlib_main_t * vm, avf_device_t * ad)
648 avf_log_debug (ad, "disable_vlan_stripping");
650 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
655 avf_op_config_promisc_mode (vlib_main_t * vm, avf_device_t * ad,
658 virtchnl_promisc_info_t pi = { 0 };
660 pi.vsi_id = ad->vsi_id;
663 pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
665 avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
666 pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
667 pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
669 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
670 sizeof (virtchnl_promisc_info_t), 0, 0);
675 avf_op_config_vsi_queues (vlib_main_t * vm, avf_device_t * ad)
678 int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
679 int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
680 sizeof (virtchnl_queue_pair_info_t);
682 virtchnl_vsi_queue_config_info_t *ci;
684 clib_memset (msg, 0, msg_len);
685 ci = (virtchnl_vsi_queue_config_info_t *) msg;
686 ci->vsi_id = ad->vsi_id;
687 ci->num_queue_pairs = n_qp;
689 avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
690 ad->vsi_id, ci->num_queue_pairs);
692 for (i = 0; i < n_qp; i++)
694 virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
695 virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
697 rxq->vsi_id = ad->vsi_id;
699 rxq->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
700 if (i < vec_len (ad->rxqs))
702 avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
703 rxq->ring_len = q->size;
704 rxq->databuffer_size = vlib_buffer_get_default_data_size (vm);
705 rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
706 avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
708 avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
709 "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
710 i, rxq->max_pkt_size, rxq->ring_len,
711 rxq->databuffer_size, rxq->dma_ring_addr);
713 txq->vsi_id = ad->vsi_id;
715 if (i < vec_len (ad->txqs))
717 avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
718 txq->ring_len = q->size;
719 txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
721 avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
722 "dma_ring_addr 0x%llx", i, txq->ring_len,
726 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
731 avf_op_config_irq_map (vlib_main_t * vm, avf_device_t * ad)
733 int msg_len = sizeof (virtchnl_irq_map_info_t) +
734 (ad->n_rx_irqs) * sizeof (virtchnl_vector_map_t);
736 virtchnl_irq_map_info_t *imi;
738 clib_memset (msg, 0, msg_len);
739 imi = (virtchnl_irq_map_info_t *) msg;
740 imi->num_vectors = ad->n_rx_irqs;
742 for (int i = 0; i < ad->n_rx_irqs; i++)
744 imi->vecmap[i].vector_id = i + 1;
745 imi->vecmap[i].vsi_id = ad->vsi_id;
746 if (ad->n_rx_irqs == ad->n_rx_queues)
747 imi->vecmap[i].rxq_map = 1 << i;
749 imi->vecmap[i].rxq_map = pow2_mask (ad->n_rx_queues);;
751 avf_log_debug (ad, "config_irq_map[%u/%u]: vsi_id %u vector_id %u "
752 "rxq_map %u", i, ad->n_rx_irqs - 1, ad->vsi_id,
753 imi->vecmap[i].vector_id, imi->vecmap[i].rxq_map);
757 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
762 avf_op_add_del_eth_addr (vlib_main_t * vm, avf_device_t * ad, u8 count,
763 u8 * macs, int is_add)
766 sizeof (virtchnl_ether_addr_list_t) +
767 count * sizeof (virtchnl_ether_addr_t);
769 virtchnl_ether_addr_list_t *al;
772 clib_memset (msg, 0, msg_len);
773 al = (virtchnl_ether_addr_list_t *) msg;
774 al->vsi_id = ad->vsi_id;
775 al->num_elements = count;
777 avf_log_debug (ad, "add_del_eth_addr: vsi_id %u num_elements %u is_add %u",
778 ad->vsi_id, al->num_elements, is_add);
780 for (i = 0; i < count; i++)
782 clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
783 avf_log_debug (ad, "add_del_eth_addr[%u]: %U", i,
784 format_ethernet_address, &al->list[i].addr);
786 return avf_send_to_pf (vm, ad, is_add ? VIRTCHNL_OP_ADD_ETH_ADDR :
787 VIRTCHNL_OP_DEL_ETH_ADDR, msg, msg_len, 0, 0);
791 avf_op_enable_queues (vlib_main_t * vm, avf_device_t * ad, u32 rx, u32 tx)
793 virtchnl_queue_select_t qs = { 0 };
795 qs.vsi_id = ad->vsi_id;
799 avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
800 ad->vsi_id, qs.rx_queues, qs.tx_queues);
806 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
807 avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
812 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
813 sizeof (virtchnl_queue_select_t), 0, 0);
817 avf_op_get_stats (vlib_main_t * vm, avf_device_t * ad,
818 virtchnl_eth_stats_t * es)
820 virtchnl_queue_select_t qs = { 0 };
822 qs.vsi_id = ad->vsi_id;
824 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS, &qs,
825 sizeof (virtchnl_queue_select_t), es,
826 sizeof (virtchnl_eth_stats_t));
828 avf_stats_log_debug (ad, "get_stats: vsi_id %u\n %U", ad->vsi_id,
829 format_avf_eth_stats, es);
835 avf_op_get_offload_vlan_v2_caps (vlib_main_t *vm, avf_device_t *ad,
836 virtchnl_vlan_caps_t *vc)
840 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 0, 0, vc,
841 sizeof (virtchnl_vlan_caps_t));
843 avf_log_debug (ad, "get_offload_vlan_v2_caps:\n%U%U", format_white_space, 16,
844 format_avf_vlan_caps, vc);
850 avf_op_disable_vlan_stripping_v2 (vlib_main_t *vm, avf_device_t *ad, u32 outer,
853 virtchnl_vlan_setting_t vs = {
854 .outer_ethertype_setting = outer,
855 .inner_ethertype_setting = inner,
856 .vport_id = ad->vsi_id,
859 avf_log_debug (ad, "disable_vlan_stripping_v2: outer: %U, inner %U",
860 format_avf_vlan_support, outer, format_avf_vlan_support,
863 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, &vs,
864 sizeof (virtchnl_vlan_setting_t), 0, 0);
868 avf_device_reset (vlib_main_t * vm, avf_device_t * ad)
870 avf_aq_desc_t d = { 0 };
873 f64 t0, t = 0, suspend_time = AVF_RESET_SUSPEND_TIME;
875 avf_log_debug (ad, "reset");
878 d.v_opcode = VIRTCHNL_OP_RESET_VF;
879 if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
882 t0 = vlib_time_now (vm);
884 vlib_process_suspend (vm, suspend_time);
886 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
888 if (rstat == 2 || rstat == 3)
890 avf_log_debug (ad, "reset completed in %.3fs", t);
894 t = vlib_time_now (vm) - t0;
895 if (t > AVF_RESET_MAX_WAIT_TIME)
897 avf_log_err (ad, "reset failed (timeout %.3fs)", t);
898 return clib_error_return (0, "reset failed (timeout)");
906 avf_request_queues (vlib_main_t * vm, avf_device_t * ad, u16 num_queue_pairs)
908 virtchnl_vf_res_request_t res_req = { 0 };
911 f64 t0, t, suspend_time = AVF_RESET_SUSPEND_TIME;
913 res_req.num_queue_pairs = num_queue_pairs;
915 avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
917 error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
918 sizeof (virtchnl_vf_res_request_t), &res_req,
919 sizeof (virtchnl_vf_res_request_t));
922 * if PF responds, the request failed
923 * else PF initializes restart and avf_send_to_pf returns an error
927 return clib_error_return (0, "requested more than %u queue pairs",
928 res_req.num_queue_pairs);
931 t0 = vlib_time_now (vm);
933 vlib_process_suspend (vm, suspend_time);
934 t = vlib_time_now (vm) - t0;
936 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
938 if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
941 if (t > AVF_RESET_MAX_WAIT_TIME)
943 avf_log_err (ad, "request queues failed (timeout %.3f seconds)", t);
944 return clib_error_return (0, "request queues failed (timeout)");
955 avf_device_init (vlib_main_t * vm, avf_main_t * am, avf_device_t * ad,
956 avf_create_if_args_t * args)
958 virtchnl_version_info_t ver = { 0 };
959 virtchnl_vf_resource_t res = { 0 };
961 vlib_thread_main_t *tm = vlib_get_thread_main ();
964 avf_adminq_init (vm, ad);
966 if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
969 /* we failed to get more queues, but still we want to proceed */
970 clib_error_free (error);
972 if ((error = avf_device_reset (vm, ad)))
976 avf_adminq_init (vm, ad);
981 if ((error = avf_op_version (vm, ad, &ver)))
984 if (ver.major != VIRTCHNL_VERSION_MAJOR ||
985 ver.minor != VIRTCHNL_VERSION_MINOR)
986 return clib_error_return (0, "incompatible protocol version "
987 "(remote %d.%d)", ver.major, ver.minor);
990 * OP_GET_VF_RESOURCES
992 if ((error = avf_op_get_vf_resources (vm, ad, &res)))
995 if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
996 return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
998 ad->vsi_id = res.vsi_res[0].vsi_id;
999 ad->cap_flags = res.vf_cap_flags;
1000 ad->num_queue_pairs = res.num_queue_pairs;
1001 ad->max_vectors = res.max_vectors;
1002 ad->max_mtu = res.max_mtu;
1003 ad->rss_key_size = res.rss_key_size;
1004 ad->rss_lut_size = res.rss_lut_size;
1005 wb_on_itr = (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) != 0;
1007 clib_memcpy_fast (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
1010 * Disable VLAN stripping
1012 if (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1014 virtchnl_vlan_caps_t vc = {};
1015 u32 outer = VIRTCHNL_VLAN_UNSUPPORTED, inner = VIRTCHNL_VLAN_UNSUPPORTED;
1016 u32 mask = VIRTCHNL_VLAN_ETHERTYPE_8100;
1018 if ((error = avf_op_get_offload_vlan_v2_caps (vm, ad, &vc)))
1021 outer = vc.offloads.stripping_support.outer & mask;
1022 inner = vc.offloads.stripping_support.inner & mask;
1024 if ((outer || inner) &&
1025 (error = avf_op_disable_vlan_stripping_v2 (vm, ad, outer, inner)))
1028 else if ((error = avf_op_disable_vlan_stripping (vm, ad)))
1034 if (args->rxq_num == 0)
1038 else if (args->rxq_num > ad->num_queue_pairs)
1040 args->rxq_num = ad->num_queue_pairs;
1041 avf_log_warn (ad, "Requested more rx queues than queue pairs available."
1042 "Using %u rx queues.", args->rxq_num);
1045 for (i = 0; i < args->rxq_num; i++)
1046 if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
1049 for (i = 0; i < tm->n_vlib_mains; i++)
1050 if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
1053 if (ad->max_vectors > ad->n_rx_queues)
1055 ad->flags |= AVF_DEVICE_F_RX_INT;
1056 ad->n_rx_irqs = args->rxq_num;
1061 if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
1062 (error = avf_op_config_rss_lut (vm, ad)))
1065 if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
1066 (error = avf_op_config_rss_key (vm, ad)))
1069 if ((error = avf_op_config_vsi_queues (vm, ad)))
1072 if ((error = avf_op_config_irq_map (vm, ad)))
1075 avf_irq_0_set_state (ad, AVF_IRQ_STATE_ENABLED);
1077 for (i = 0; i < ad->n_rx_irqs; i++)
1078 avf_irq_n_set_state (ad, i, wb_on_itr ? AVF_IRQ_STATE_WB_ON_ITR :
1079 AVF_IRQ_STATE_ENABLED);
1081 if ((error = avf_op_add_del_eth_addr (vm, ad, 1, ad->hwaddr, 1 /* add */ )))
1084 if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
1085 pow2_mask (ad->n_tx_queues))))
1088 ad->flags |= AVF_DEVICE_F_INITIALIZED;
1093 avf_process_one_device (vlib_main_t * vm, avf_device_t * ad, int is_irq)
1095 vnet_main_t *vnm = vnet_get_main ();
1096 virtchnl_pf_event_t *e;
1099 if (ad->flags & AVF_DEVICE_F_ERROR)
1102 if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
1105 ASSERT (ad->error == 0);
1107 /* do not process device in reset state */
1108 r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
1109 if (r != VIRTCHNL_VFR_VFACTIVE)
1112 r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
1113 if ((r & 0xf0000000) != (1ULL << 31))
1115 ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
1116 avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1120 r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
1121 if ((r & 0xf0000000) != (1ULL << 31))
1123 ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
1124 avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1129 avf_op_get_stats (vm, ad, &ad->eth_stats);
1132 vec_foreach (e, ad->events)
1134 avf_log_debug (ad, "event: %s (%u) sev %d",
1135 virtchnl_event_names[e->event], e->event, e->severity);
1136 if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
1139 virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
1143 if (ad->cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1144 link_up = e->event_data.link_event_adv.link_status;
1146 link_up = e->event_data.link_event.link_status;
1148 if (ad->cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1149 mbps = e->event_data.link_event_adv.link_speed;
1150 if (speed == VIRTCHNL_LINK_SPEED_40GB)
1152 else if (speed == VIRTCHNL_LINK_SPEED_25GB)
1154 else if (speed == VIRTCHNL_LINK_SPEED_10GB)
1156 else if (speed == VIRTCHNL_LINK_SPEED_5GB)
1158 else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
1160 else if (speed == VIRTCHNL_LINK_SPEED_1GB)
1162 else if (speed == VIRTCHNL_LINK_SPEED_100MB)
1165 avf_log_debug (ad, "event_link_change: status %d speed %u mbps",
1168 if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
1170 ad->flags |= AVF_DEVICE_F_LINK_UP;
1171 flags |= (VNET_HW_INTERFACE_FLAG_FULL_DUPLEX |
1172 VNET_HW_INTERFACE_FLAG_LINK_UP);
1173 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
1174 vnet_hw_interface_set_link_speed (vnm, ad->hw_if_index,
1176 ad->link_speed = mbps;
1178 else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
1180 ad->flags &= ~AVF_DEVICE_F_LINK_UP;
1184 if (ad->flags & AVF_DEVICE_F_ELOG)
1186 ELOG_TYPE_DECLARE (el) =
1188 .format = "avf[%d] link change: link_status %d "
1189 "link_speed %d mbps",
1190 .format_args = "i4i1i4",
1198 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1199 ed->dev_instance = ad->dev_instance;
1200 ed->link_status = link_up;
1201 ed->link_speed = mbps;
1206 if (ad->flags & AVF_DEVICE_F_ELOG)
1208 ELOG_TYPE_DECLARE (el) =
1210 .format = "avf[%d] unknown event: event %d severity %d",
1211 .format_args = "i4i4i1i1",
1219 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1220 ed->dev_instance = ad->dev_instance;
1221 ed->event = e->event;
1222 ed->severity = e->severity;
1227 vec_reset_length (ad->events);
1232 ad->flags |= AVF_DEVICE_F_ERROR;
1233 ASSERT (ad->error != 0);
1234 vlib_log_err (avf_log.class, "%U", format_clib_error, ad->error);
1238 avf_op_program_flow (vlib_main_t *vm, avf_device_t *ad, int is_create,
1239 u8 *rule, u32 rule_len, u8 *program_status,
1242 avf_log_debug (ad, "avf_op_program_flow: vsi_id %u is_create %u", ad->vsi_id,
1245 return avf_send_to_pf (vm, ad,
1246 is_create ? VIRTCHNL_OP_ADD_FDIR_FILTER :
1247 VIRTCHNL_OP_DEL_FDIR_FILTER,
1248 rule, rule_len, program_status, status_len);
1252 avf_process_handle_request (vlib_main_t * vm, avf_process_req_t * req)
1254 avf_device_t *ad = avf_get_device (req->dev_instance);
1256 if (req->type == AVF_PROCESS_REQ_ADD_DEL_ETH_ADDR)
1257 req->error = avf_op_add_del_eth_addr (vm, ad, 1, req->eth_addr,
1259 else if (req->type == AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE)
1260 req->error = avf_op_config_promisc_mode (vm, ad, req->is_enable);
1261 else if (req->type == AVF_PROCESS_REQ_PROGRAM_FLOW)
1263 avf_op_program_flow (vm, ad, req->is_add, req->rule, req->rule_len,
1264 req->program_status, req->status_len);
1266 clib_panic ("BUG: unknown avf proceess request type");
1268 if (req->calling_process_index != avf_process_node.index)
1269 vlib_process_signal_event (vm, req->calling_process_index, 0, 0);
1272 static clib_error_t *
1273 avf_process_request (vlib_main_t * vm, avf_process_req_t * req)
1275 uword *event_data = 0;
1276 req->calling_process_index = vlib_get_current_process_node_index (vm);
1278 if (req->calling_process_index != avf_process_node.index)
1280 vlib_process_signal_event_pointer (vm, avf_process_node.index,
1281 AVF_PROCESS_EVENT_REQ, req);
1283 vlib_process_wait_for_event_or_clock (vm, 5.0);
1285 if (vlib_process_get_events (vm, &event_data) != 0)
1286 clib_panic ("avf process node failed to reply in 5 seconds");
1287 vec_free (event_data);
1290 avf_process_handle_request (vm, req);
1296 avf_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
1298 avf_process_req_t req;
1299 vlib_main_t *vm = vlib_get_main ();
1300 avf_device_t *ad = avf_get_device (hw->dev_instance);
1305 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
1306 ad->flags &= ~AVF_DEVICE_F_PROMISC;
1308 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
1309 ad->flags |= AVF_DEVICE_F_PROMISC;
1315 req.is_enable = ((ad->flags & AVF_DEVICE_F_PROMISC) != 0);
1316 req.type = AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE;
1317 req.dev_instance = hw->dev_instance;
1319 if ((err = avf_process_request (vm, &req)))
1321 avf_log_err (ad, "error: %U", format_clib_error, err);
1322 clib_error_free (err);
1329 avf_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
1331 avf_main_t *am = &avf_main;
1332 uword *event_data = 0, event_type;
1333 int enabled = 0, irq;
1334 f64 last_run_duration = 0;
1335 f64 last_periodic_time = 0;
1336 avf_device_t **dev_pointers = 0;
1342 vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1344 vlib_process_wait_for_event (vm);
1346 event_type = vlib_process_get_events (vm, &event_data);
1352 last_periodic_time = vlib_time_now (vm);
1354 case AVF_PROCESS_EVENT_START:
1357 case AVF_PROCESS_EVENT_DELETE_IF:
1358 for (int i = 0; i < vec_len (event_data); i++)
1360 avf_device_t *ad = avf_get_device (event_data[i]);
1361 avf_delete_if (vm, ad, /* with_barrier */ 1);
1363 if (pool_elts (am->devices) < 1)
1366 case AVF_PROCESS_EVENT_AQ_INT:
1369 case AVF_PROCESS_EVENT_REQ:
1370 for (int i = 0; i < vec_len (event_data); i++)
1371 avf_process_handle_request (vm, (void *) event_data[i]);
1378 vec_reset_length (event_data);
1383 /* create local list of device pointers as device pool may grow
1385 vec_reset_length (dev_pointers);
1387 pool_foreach_index (i, am->devices)
1389 vec_add1 (dev_pointers, avf_get_device (i));
1392 vec_foreach_index (i, dev_pointers)
1394 avf_process_one_device (vm, dev_pointers[i], irq);
1397 last_run_duration = vlib_time_now (vm) - last_periodic_time;
1403 VLIB_REGISTER_NODE (avf_process_node) = {
1404 .function = avf_process,
1405 .type = VLIB_NODE_TYPE_PROCESS,
1406 .name = "avf-process",
1411 avf_irq_0_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
1413 uword pd = vlib_pci_get_private_data (vm, h);
1414 avf_device_t *ad = avf_get_device (pd);
1417 icr0 = avf_reg_read (ad, AVFINT_ICR0);
1419 if (ad->flags & AVF_DEVICE_F_ELOG)
1422 ELOG_TYPE_DECLARE (el) =
1424 .format = "avf[%d] irq 0: icr0 0x%x",
1425 .format_args = "i4i4",
1434 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1435 ed->dev_instance = ad->dev_instance;
1439 avf_irq_0_set_state (ad, AVF_IRQ_STATE_ENABLED);
1441 /* bit 30 - Send/Receive Admin queue interrupt indication */
1442 if (icr0 & (1 << 30))
1443 vlib_process_signal_event (vm, avf_process_node.index,
1444 AVF_PROCESS_EVENT_AQ_INT, 0);
1448 avf_irq_n_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
1450 vnet_main_t *vnm = vnet_get_main ();
1451 uword pd = vlib_pci_get_private_data (vm, h);
1452 avf_device_t *ad = avf_get_device (pd);
1453 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, line - 1);
1455 if (ad->flags & AVF_DEVICE_F_ELOG)
1458 ELOG_TYPE_DECLARE (el) =
1460 .format = "avf[%d] irq %d: received",
1461 .format_args = "i4i2",
1470 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1471 ed->dev_instance = ad->dev_instance;
1477 if (ad->flags & AVF_DEVICE_F_RX_INT && rxq->int_mode)
1478 vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
1479 avf_irq_n_set_state (ad, line, AVF_IRQ_STATE_ENABLED);
1483 avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier)
1485 vnet_main_t *vnm = vnet_get_main ();
1486 avf_main_t *am = &avf_main;
1490 ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1492 if (ad->hw_if_index)
1495 vlib_worker_thread_barrier_sync (vm);
1496 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1497 ethernet_delete_interface (vnm, ad->hw_if_index);
1499 vlib_worker_thread_barrier_release (vm);
1502 vlib_pci_device_close (vm, ad->pci_dev_handle);
1504 vlib_physmem_free (vm, ad->atq);
1505 vlib_physmem_free (vm, ad->arq);
1506 vlib_physmem_free (vm, ad->atq_bufs);
1507 vlib_physmem_free (vm, ad->arq_bufs);
1510 vec_foreach_index (i, ad->rxqs)
1512 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1513 vlib_physmem_free (vm, (void *) rxq->descs);
1514 if (rxq->n_enqueued)
1515 vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1517 vec_free (rxq->bufs);
1520 vec_free (ad->rxqs);
1523 vec_foreach_index (i, ad->txqs)
1525 avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1526 vlib_physmem_free (vm, (void *) txq->descs);
1527 if (txq->n_enqueued)
1529 u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1530 vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1533 /* Free the placeholder buffer */
1534 vlib_buffer_free (vm, txq->ph_bufs, vec_len (txq->ph_bufs));
1535 vec_free (txq->ph_bufs);
1536 vec_free (txq->bufs);
1537 clib_ring_free (txq->rs_slots);
1538 vec_free (txq->tmp_bufs);
1539 vec_free (txq->tmp_descs);
1540 clib_spinlock_free (&txq->lock);
1543 vec_free (ad->txqs);
1544 vec_free (ad->name);
1546 clib_error_free (ad->error);
1547 dev_instance = ad->dev_instance;
1548 clib_memset (ad, 0, sizeof (*ad));
1549 pool_put_index (am->devices, dev_instance);
1554 avf_validate_queue_size (avf_create_if_args_t * args)
1556 clib_error_t *error = 0;
1558 args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1559 args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1561 if ((args->rxq_size > AVF_QUEUE_SZ_MAX)
1562 || (args->txq_size > AVF_QUEUE_SZ_MAX))
1564 args->rv = VNET_API_ERROR_INVALID_VALUE;
1566 clib_error_return (error, "queue size must not be greater than %u",
1570 if ((args->rxq_size < AVF_QUEUE_SZ_MIN)
1571 || (args->txq_size < AVF_QUEUE_SZ_MIN))
1573 args->rv = VNET_API_ERROR_INVALID_VALUE;
1575 clib_error_return (error, "queue size must not be smaller than %u",
1579 if ((args->rxq_size & (args->rxq_size - 1)) ||
1580 (args->txq_size & (args->txq_size - 1)))
1582 args->rv = VNET_API_ERROR_INVALID_VALUE;
1584 clib_error_return (error, "queue size must be a power of two");
1591 avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args)
1593 vnet_main_t *vnm = vnet_get_main ();
1594 avf_main_t *am = &avf_main;
1595 avf_device_t *ad, **adp;
1596 vlib_pci_dev_handle_t h;
1597 clib_error_t *error = 0;
1600 /* check input args */
1601 if (avf_validate_queue_size (args) != 0)
1605 pool_foreach (adp, am->devices) {
1606 if ((*adp)->pci_addr.as_u32 == args->addr.as_u32)
1608 args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
1610 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
1611 &args->addr, "pci address in use");
1617 pool_get (am->devices, adp);
1618 adp[0] = ad = clib_mem_alloc_aligned (sizeof (avf_device_t),
1619 CLIB_CACHE_LINE_BYTES);
1620 clib_memset (ad, 0, sizeof (avf_device_t));
1621 ad->dev_instance = adp - am->devices;
1622 ad->per_interface_next_index = ~0;
1623 ad->name = vec_dup (args->name);
1625 if (args->enable_elog)
1626 ad->flags |= AVF_DEVICE_F_ELOG;
1628 if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1631 pool_put (am->devices, adp);
1633 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1635 clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1639 ad->pci_dev_handle = h;
1640 ad->pci_addr = args->addr;
1641 ad->numa_node = vlib_pci_get_numa_node (vm, h);
1643 vlib_pci_set_private_data (vm, h, ad->dev_instance);
1645 if ((error = vlib_pci_bus_master_enable (vm, h)))
1648 if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1651 ad->atq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
1653 CLIB_CACHE_LINE_BYTES,
1657 error = vlib_physmem_last_error (vm);
1661 if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1664 ad->arq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
1666 CLIB_CACHE_LINE_BYTES,
1670 error = vlib_physmem_last_error (vm);
1674 if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1677 ad->atq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
1679 CLIB_CACHE_LINE_BYTES,
1681 if (ad->atq_bufs == 0)
1683 error = vlib_physmem_last_error (vm);
1687 if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1690 ad->arq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
1692 CLIB_CACHE_LINE_BYTES,
1694 if (ad->arq_bufs == 0)
1696 error = vlib_physmem_last_error (vm);
1700 if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1703 if (vlib_pci_supports_virtual_addr_dma (vm, h))
1704 ad->flags |= AVF_DEVICE_F_VA_DMA;
1706 if ((error = avf_device_init (vm, am, ad, args)))
1709 if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1710 &avf_irq_0_handler)))
1713 if ((error = vlib_pci_register_msix_handler (vm, h, 1, ad->n_rx_irqs,
1714 &avf_irq_n_handler)))
1717 if ((error = vlib_pci_enable_msix_irq (vm, h, 0, ad->n_rx_irqs + 1)))
1720 if ((error = vlib_pci_intr_enable (vm, h)))
1723 /* create interface */
1724 error = ethernet_register_interface (vnm, avf_device_class.index,
1725 ad->dev_instance, ad->hwaddr,
1726 &ad->hw_if_index, avf_flag_change);
1731 /* Indicate ability to support L3 DMAC filtering and
1732 * initialize interface to L3 non-promisc mode */
1733 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ad->hw_if_index);
1734 hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER |
1735 VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM |
1736 VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO;
1737 ethernet_set_flags (vnm, ad->hw_if_index,
1738 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
1740 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
1741 args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1743 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, ad->hw_if_index);
1744 hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
1745 vnet_hw_if_set_input_node (vnm, ad->hw_if_index, avf_input_node.index);
1747 for (i = 0; i < ad->n_rx_queues; i++)
1750 qi = vnet_hw_if_register_rx_queue (vnm, ad->hw_if_index, i,
1751 VNET_HW_IF_RXQ_THREAD_ANY);
1753 if (ad->flags & AVF_DEVICE_F_RX_INT)
1755 fi = vlib_pci_get_msix_file_index (vm, ad->pci_dev_handle, i + 1);
1756 vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
1758 ad->rxqs[i].queue_index = qi;
1761 for (i = 0; i < ad->n_tx_queues; i++)
1763 u32 qi = vnet_hw_if_register_tx_queue (vnm, ad->hw_if_index, i);
1764 vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
1765 ad->txqs[i].queue_index = qi;
1768 vnet_hw_if_update_runtime_data (vnm, ad->hw_if_index);
1770 if (pool_elts (am->devices) == 1)
1771 vlib_process_signal_event (vm, avf_process_node.index,
1772 AVF_PROCESS_EVENT_START, 0);
1777 avf_delete_if (vm, ad, /* with_barrier */ 0);
1778 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1779 args->error = clib_error_return (error, "pci-addr %U",
1780 format_vlib_pci_addr, &args->addr);
1781 avf_log_err (ad, "error: %U", format_clib_error, args->error);
1784 static clib_error_t *
1785 avf_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1787 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1788 avf_device_t *ad = avf_get_device (hi->dev_instance);
1789 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1791 if (ad->flags & AVF_DEVICE_F_ERROR)
1792 return clib_error_return (0, "device is in error state");
1796 vnet_hw_interface_set_flags (vnm, ad->hw_if_index,
1797 VNET_HW_INTERFACE_FLAG_LINK_UP);
1798 ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1802 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1803 ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1808 static clib_error_t *
1809 avf_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
1810 vnet_hw_if_rx_mode mode)
1812 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1813 avf_device_t *ad = avf_get_device (hw->dev_instance);
1814 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1816 if (mode == VNET_HW_IF_RX_MODE_POLLING)
1818 if (rxq->int_mode == 0)
1820 if (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1821 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_WB_ON_ITR);
1823 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_ENABLED);
1828 if (rxq->int_mode == 1)
1830 if (ad->n_rx_irqs != ad->n_rx_queues)
1831 return clib_error_return (0, "not enough interrupt lines");
1833 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_ENABLED);
1840 avf_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1843 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1844 avf_device_t *ad = avf_get_device (hw->dev_instance);
1846 /* Shut off redirection */
1847 if (node_index == ~0)
1849 ad->per_interface_next_index = node_index;
1853 ad->per_interface_next_index =
1854 vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1857 static clib_error_t *
1858 avf_add_del_mac_address (vnet_hw_interface_t * hw,
1859 const u8 * address, u8 is_add)
1861 vlib_main_t *vm = vlib_get_main ();
1862 avf_process_req_t req;
1864 req.dev_instance = hw->dev_instance;
1865 req.type = AVF_PROCESS_REQ_ADD_DEL_ETH_ADDR;
1866 req.is_add = is_add;
1867 clib_memcpy (req.eth_addr, address, 6);
1869 return avf_process_request (vm, &req);
1872 static char *avf_tx_func_error_strings[] = {
1874 foreach_avf_tx_func_error
1879 avf_clear_hw_interface_counters (u32 instance)
1881 avf_device_t *ad = avf_get_device (instance);
1882 clib_memcpy_fast (&ad->last_cleared_eth_stats,
1883 &ad->eth_stats, sizeof (ad->eth_stats));
1887 avf_program_flow (u32 dev_instance, int is_add, u8 *rule, u32 rule_len,
1888 u8 *program_status, u32 status_len)
1890 vlib_main_t *vm = vlib_get_main ();
1891 avf_process_req_t req;
1893 req.dev_instance = dev_instance;
1894 req.type = AVF_PROCESS_REQ_PROGRAM_FLOW;
1895 req.is_add = is_add;
1897 req.rule_len = rule_len;
1898 req.program_status = program_status;
1899 req.status_len = status_len;
1901 return avf_process_request (vm, &req);
1905 VNET_DEVICE_CLASS (avf_device_class, ) = {
1906 .name = "Adaptive Virtual Function (AVF) interface",
1907 .clear_counters = avf_clear_hw_interface_counters,
1908 .format_device = format_avf_device,
1909 .format_device_name = format_avf_device_name,
1910 .admin_up_down_function = avf_interface_admin_up_down,
1911 .rx_mode_change_function = avf_interface_rx_mode_change,
1912 .rx_redirect_to_node = avf_set_interface_next_node,
1913 .mac_addr_add_del_function = avf_add_del_mac_address,
1914 .tx_function_n_errors = AVF_TX_N_ERROR,
1915 .tx_function_error_strings = avf_tx_func_error_strings,
1916 .flow_ops_function = avf_flow_ops_fn,
1921 avf_init (vlib_main_t * vm)
1923 avf_main_t *am = &avf_main;
1924 vlib_thread_main_t *tm = vlib_get_thread_main ();
1926 vec_validate_aligned (am->per_thread_data, tm->n_vlib_mains - 1,
1927 CLIB_CACHE_LINE_BYTES);
1933 VLIB_INIT_FUNCTION (avf_init) =
1935 .runs_after = VLIB_INITS ("pci_bus_init"),
1940 * fd.io coding-style-patch-verification: ON
1943 * eval: (c-set-style "gnu")