2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 250
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
38 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier);
40 static pci_device_id_t avf_pci_device_ids[] = {
41 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_AVF},
42 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
43 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
47 const static char *virtchnl_event_names[] = {
48 #define _(v, n) [v] = #n,
49 foreach_virtchnl_event_code
55 AVF_IRQ_STATE_DISABLED,
56 AVF_IRQ_STATE_ENABLED,
57 AVF_IRQ_STATE_WB_ON_ITR,
61 avf_irq_0_set_state (avf_device_t * ad, avf_irq_state_t state)
63 u32 dyn_ctl0 = 0, icr0_ena = 0;
65 dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
67 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
68 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
71 if (state == AVF_IRQ_STATE_DISABLED)
77 icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
79 dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
80 dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
81 dyn_ctl0 |= (2 << 3); /* [4:3] ITR Index, 11b = No ITR update */
82 dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
84 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
85 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
90 avf_irq_n_set_state (avf_device_t * ad, u8 line, avf_irq_state_t state)
95 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
98 if (state == AVF_IRQ_STATE_DISABLED)
101 dyn_ctln |= (1 << 1); /* [1] Clear PBA */
102 if (state == AVF_IRQ_STATE_WB_ON_ITR)
104 /* minimal ITR interval, use ITR1 */
105 dyn_ctln |= (1 << 3); /* [4:3] ITR Index */
106 dyn_ctln |= ((32 / 2) << 5); /* [16:5] ITR Interval in 2us steps */
107 dyn_ctln |= (1 << 30); /* [30] Writeback on ITR */
111 /* configured ITR interval, use ITR0 */
112 dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
113 dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
116 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
122 avf_aq_desc_enq (vlib_main_t * vm, avf_device_t * ad, avf_aq_desc_t * dt,
125 clib_error_t *err = 0;
126 avf_aq_desc_t *d, dc;
127 f64 t0, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
129 d = &ad->atq[ad->atq_next_slot];
130 clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
131 d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
137 pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
138 d->addr_hi = (u32) (pa >> 32);
139 d->addr_lo = (u32) pa;
140 clib_memcpy_fast (ad->atq_bufs + ad->atq_next_slot * AVF_MBOX_BUF_SZ,
142 d->flags |= AVF_AQ_F_BUF;
145 if (ad->flags & AVF_DEVICE_F_ELOG)
146 clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
148 CLIB_MEMORY_BARRIER ();
149 ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
150 avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
153 t0 = vlib_time_now (vm);
155 vlib_process_suspend (vm, suspend_time);
157 if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
159 f64 t = vlib_time_now (vm) - t0;
160 if (t > AVF_AQ_ENQ_MAX_WAIT_TIME)
162 avf_log_err (ad, "aq_desc_enq failed (timeout %.3fs)", t);
163 err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
171 clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
172 if (d->flags & AVF_AQ_F_ERR)
173 return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
174 "%d]", d->opcode, d->retval);
177 if (ad->flags & AVF_DEVICE_F_ELOG)
180 ELOG_TYPE_DECLARE (el) =
182 .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
183 "datalen %d retval %d",
184 .format_args = "i4i2i2i2i2i2",
195 ed = ELOG_DATA (&vm->elog_main, el);
196 ed->dev_instance = ad->dev_instance;
197 ed->s_flags = dc.flags;
198 ed->r_flags = d->flags;
199 ed->opcode = dc.opcode;
200 ed->datalen = dc.datalen;
201 ed->retval = d->retval;
209 avf_cmd_rx_ctl_reg_write (vlib_main_t * vm, avf_device_t * ad, u32 reg,
213 avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
214 err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
216 if (ad->flags & AVF_DEVICE_F_ELOG)
219 ELOG_TYPE_DECLARE (el) =
221 .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
222 .format_args = "i4i4i4",
230 ed = ELOG_DATA (&vm->elog_main, el);
231 ed->dev_instance = ad->dev_instance;
240 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
246 vec_validate_aligned (ad->rxqs, qid, CLIB_CACHE_LINE_BYTES);
247 rxq = vec_elt_at_index (ad->rxqs, qid);
248 rxq->size = rxq_size;
250 rxq->descs = vlib_physmem_alloc_aligned_on_numa (vm, rxq->size *
251 sizeof (avf_rx_desc_t),
252 2 * CLIB_CACHE_LINE_BYTES,
255 rxq->buffer_pool_index =
256 vlib_buffer_pool_get_default_for_numa (vm, ad->numa_node);
259 return vlib_physmem_last_error (vm);
261 if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
264 clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
265 vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
266 rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
268 n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
269 rxq->buffer_pool_index);
272 return clib_error_return (0, "buffer allocation error");
274 rxq->n_enqueued = n_alloc;
275 avf_rx_desc_t *d = rxq->descs;
276 for (i = 0; i < n_alloc; i++)
278 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
279 if (ad->flags & AVF_DEVICE_F_VA_DMA)
280 d->qword[0] = vlib_buffer_get_va (b);
282 d->qword[0] = vlib_buffer_get_pa (vm, b);
286 ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
291 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
296 if (qid >= ad->num_queue_pairs)
298 qid = qid % ad->num_queue_pairs;
299 txq = vec_elt_at_index (ad->txqs, qid);
301 clib_spinlock_init (&txq->lock);
302 ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
306 vec_validate_aligned (ad->txqs, qid, CLIB_CACHE_LINE_BYTES);
307 txq = vec_elt_at_index (ad->txqs, qid);
308 txq->size = txq_size;
310 txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
311 sizeof (avf_tx_desc_t),
312 2 * CLIB_CACHE_LINE_BYTES,
315 return vlib_physmem_last_error (vm);
317 if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
320 vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
321 txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
323 /* initialize ring of pending RS slots */
324 clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
326 ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
334 } virtchnl_promisc_info_t;
337 avf_arq_slot_init (avf_device_t * ad, u16 slot)
340 u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
342 clib_memset (d, 0, sizeof (avf_aq_desc_t));
343 d->flags = AVF_AQ_F_BUF;
344 d->datalen = AVF_MBOX_BUF_SZ;
345 d->addr_hi = (u32) (pa >> 32);
346 d->addr_lo = (u32) pa;
350 avf_dma_addr (vlib_main_t * vm, avf_device_t * ad, void *p)
352 return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
353 pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
357 avf_adminq_init (vlib_main_t * vm, avf_device_t * ad)
362 /* VF MailBox Transmit */
363 clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
364 ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
366 pa = avf_dma_addr (vm, ad, ad->atq);
367 avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
368 avf_reg_write (ad, AVF_ATQH, 0); /* Head */
369 avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
370 avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
371 avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
373 /* VF MailBox Receive */
374 clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
375 ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
377 for (i = 0; i < AVF_MBOX_LEN; i++)
378 avf_arq_slot_init (ad, i);
380 pa = avf_dma_addr (vm, ad, ad->arq);
382 avf_reg_write (ad, AVF_ARQH, 0); /* Head */
383 avf_reg_write (ad, AVF_ARQT, 0); /* Head */
384 avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
385 avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
386 avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
387 avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
389 ad->atq_next_slot = 0;
390 ad->arq_next_slot = 0;
394 avf_send_to_pf (vlib_main_t * vm, avf_device_t * ad, virtchnl_ops_t op,
395 void *in, int in_len, void *out, int out_len)
398 avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
400 f64 t0, suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
402 /* adminq operations should be only done from process node after device
404 ASSERT ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0 ||
405 vlib_get_current_process_node_index (vm) == avf_process_node.index);
407 /* suppress interrupt in the next adminq receive slot
408 as we are going to wait for response
409 we only need interrupts when event is received */
410 d = &ad->arq[ad->arq_next_slot];
411 d->flags |= AVF_AQ_F_SI;
413 if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
416 t0 = vlib_time_now (vm);
418 head = avf_get_u32 (ad->bar0, AVF_ARQH);
420 if (ad->arq_next_slot == head)
422 f64 t = vlib_time_now (vm) - t0;
423 if (t > AVF_SEND_TO_PF_MAX_WAIT_TIME)
425 avf_log_err (ad, "send_to_pf failed (timeout %.3fs)", t);
426 return clib_error_return (0, "timeout");
428 vlib_process_suspend (vm, suspend_time);
433 d = &ad->arq[ad->arq_next_slot];
435 if (d->v_opcode == VIRTCHNL_OP_EVENT)
437 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
438 virtchnl_pf_event_t *e;
440 if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
441 ((d->flags & AVF_AQ_F_BUF) == 0))
442 return clib_error_return (0, "event message error");
444 vec_add2 (ad->events, e, 1);
445 clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
446 avf_arq_slot_init (ad, ad->arq_next_slot);
449 t0 = vlib_time_now (vm);
450 suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
454 if (d->v_opcode != op)
457 clib_error_return (0,
458 "unexpected message receiver [v_opcode = %u, "
459 "expected %u, v_retval %d]", d->v_opcode, op,
466 err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
467 d->v_opcode, d->v_retval);
471 if (d->flags & AVF_AQ_F_BUF)
473 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
474 clib_memcpy_fast (out, buf, out_len);
477 avf_arq_slot_init (ad, ad->arq_next_slot);
478 avf_reg_write (ad, AVF_ARQT, ad->arq_next_slot);
480 ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
484 if (ad->flags & AVF_DEVICE_F_ELOG)
487 ELOG_TYPE_DECLARE (el) =
489 .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
490 .format_args = "i4t4i4i4",
491 .n_enum_strings = VIRTCHNL_N_OPS,
493 #define _(v, n) [v] = #n,
505 ed = ELOG_DATA (&vm->elog_main, el);
506 ed->dev_instance = ad->dev_instance;
508 ed->v_opcode_val = op;
509 ed->v_retval = d->v_retval;
516 avf_op_version (vlib_main_t * vm, avf_device_t * ad,
517 virtchnl_version_info_t * ver)
519 clib_error_t *err = 0;
520 virtchnl_version_info_t myver = {
521 .major = VIRTCHNL_VERSION_MAJOR,
522 .minor = VIRTCHNL_VERSION_MINOR,
525 avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
527 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
528 sizeof (virtchnl_version_info_t), ver,
529 sizeof (virtchnl_version_info_t));
538 avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
539 virtchnl_vf_resource_t * res)
541 clib_error_t *err = 0;
542 u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
543 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
544 VIRTCHNL_VF_OFFLOAD_RX_POLLING |
545 VIRTCHNL_VF_CAP_ADV_LINK_SPEED);
547 avf_log_debug (ad, "get_vf_reqources: bitmap 0x%x", bitmap);
548 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
549 sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
554 avf_log_debug (ad, "get_vf_reqources: num_vsis %u num_queue_pairs %u "
555 "max_vectors %u max_mtu %u vf_offload_flags 0x%04x "
556 "rss_key_size %u rss_lut_size %u",
557 res->num_vsis, res->num_queue_pairs, res->max_vectors,
558 res->max_mtu, res->vf_offload_flags, res->rss_key_size,
560 for (i = 0; i < res->num_vsis; i++)
561 avf_log_debug (ad, "get_vf_reqources_vsi[%u]: vsi_id %u "
562 "num_queue_pairs %u vsi_type %u qset_handle %u "
563 "default_mac_addr %U", i,
564 res->vsi_res[i].vsi_id,
565 res->vsi_res[i].num_queue_pairs,
566 res->vsi_res[i].vsi_type,
567 res->vsi_res[i].qset_handle,
568 format_ethernet_address,
569 res->vsi_res[i].default_mac_addr);
576 avf_op_config_rss_lut (vlib_main_t * vm, avf_device_t * ad)
578 int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
581 virtchnl_rss_lut_t *rl;
583 clib_memset (msg, 0, msg_len);
584 rl = (virtchnl_rss_lut_t *) msg;
585 rl->vsi_id = ad->vsi_id;
586 rl->lut_entries = ad->rss_lut_size;
587 for (i = 0; i < ad->rss_lut_size; i++)
588 rl->lut[i] = i % ad->n_rx_queues;
590 avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
591 rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
592 rl->lut, rl->lut_entries);
594 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
599 avf_op_config_rss_key (vlib_main_t * vm, avf_device_t * ad)
601 int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
604 virtchnl_rss_key_t *rk;
606 clib_memset (msg, 0, msg_len);
607 rk = (virtchnl_rss_key_t *) msg;
608 rk->vsi_id = ad->vsi_id;
609 rk->key_len = ad->rss_key_size;
610 u32 seed = random_default_seed ();
611 for (i = 0; i < ad->rss_key_size; i++)
612 rk->key[i] = (u8) random_u32 (&seed);
614 avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
615 rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
618 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
623 avf_op_disable_vlan_stripping (vlib_main_t * vm, avf_device_t * ad)
625 avf_log_debug (ad, "disable_vlan_stripping");
627 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
632 avf_config_promisc_mode (vlib_main_t * vm, avf_device_t * ad, int is_enable)
634 virtchnl_promisc_info_t pi = { 0 };
636 pi.vsi_id = ad->vsi_id;
639 pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
641 avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
642 pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
643 pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
645 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
646 sizeof (virtchnl_promisc_info_t), 0, 0);
651 avf_op_config_vsi_queues (vlib_main_t * vm, avf_device_t * ad)
654 int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
655 int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
656 sizeof (virtchnl_queue_pair_info_t);
658 virtchnl_vsi_queue_config_info_t *ci;
660 clib_memset (msg, 0, msg_len);
661 ci = (virtchnl_vsi_queue_config_info_t *) msg;
662 ci->vsi_id = ad->vsi_id;
663 ci->num_queue_pairs = n_qp;
665 avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
666 ad->vsi_id, ci->num_queue_pairs);
668 for (i = 0; i < n_qp; i++)
670 virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
671 virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
673 rxq->vsi_id = ad->vsi_id;
675 rxq->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
676 if (i < vec_len (ad->rxqs))
678 avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
679 rxq->ring_len = q->size;
680 rxq->databuffer_size = vlib_buffer_get_default_data_size (vm);
681 rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
682 avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
684 avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
685 "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
686 i, rxq->max_pkt_size, rxq->ring_len,
687 rxq->databuffer_size, rxq->dma_ring_addr);
689 txq->vsi_id = ad->vsi_id;
691 if (i < vec_len (ad->txqs))
693 avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
694 txq->ring_len = q->size;
695 txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
697 avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
698 "dma_ring_addr 0x%llx", i, txq->ring_len,
702 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
707 avf_op_config_irq_map (vlib_main_t * vm, avf_device_t * ad)
709 int msg_len = sizeof (virtchnl_irq_map_info_t) +
710 (ad->n_rx_irqs) * sizeof (virtchnl_vector_map_t);
712 virtchnl_irq_map_info_t *imi;
714 clib_memset (msg, 0, msg_len);
715 imi = (virtchnl_irq_map_info_t *) msg;
716 imi->num_vectors = ad->n_rx_irqs;
718 for (int i = 0; i < ad->n_rx_irqs; i++)
720 imi->vecmap[i].vector_id = i + 1;
721 imi->vecmap[i].vsi_id = ad->vsi_id;
722 if (ad->n_rx_irqs == ad->n_rx_queues)
723 imi->vecmap[i].rxq_map = 1 << i;
725 imi->vecmap[i].rxq_map = pow2_mask (ad->n_rx_queues);;
727 avf_log_debug (ad, "config_irq_map[%u/%u]: vsi_id %u vector_id %u "
728 "rxq_map %u", i, ad->n_rx_irqs - 1, ad->vsi_id,
729 imi->vecmap[i].vector_id, imi->vecmap[i].rxq_map);
733 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
738 avf_op_add_eth_addr (vlib_main_t * vm, avf_device_t * ad, u8 count, u8 * macs)
741 sizeof (virtchnl_ether_addr_list_t) +
742 count * sizeof (virtchnl_ether_addr_t);
744 virtchnl_ether_addr_list_t *al;
747 clib_memset (msg, 0, msg_len);
748 al = (virtchnl_ether_addr_list_t *) msg;
749 al->vsi_id = ad->vsi_id;
750 al->num_elements = count;
752 avf_log_debug (ad, "add_eth_addr: vsi_id %u num_elements %u",
753 ad->vsi_id, al->num_elements);
755 for (i = 0; i < count; i++)
757 clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
758 avf_log_debug (ad, "add_eth_addr[%u]: %U", i,
759 format_ethernet_address, &al->list[i].addr);
761 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
766 avf_op_enable_queues (vlib_main_t * vm, avf_device_t * ad, u32 rx, u32 tx)
768 virtchnl_queue_select_t qs = { 0 };
770 qs.vsi_id = ad->vsi_id;
774 avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
775 ad->vsi_id, qs.rx_queues, qs.tx_queues);
781 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
782 avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
787 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
788 sizeof (virtchnl_queue_select_t), 0, 0);
792 avf_op_get_stats (vlib_main_t * vm, avf_device_t * ad,
793 virtchnl_eth_stats_t * es)
795 virtchnl_queue_select_t qs = { 0 };
796 qs.vsi_id = ad->vsi_id;
798 avf_log_debug (ad, "get_stats: vsi_id %u", ad->vsi_id);
800 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
801 &qs, sizeof (virtchnl_queue_select_t),
802 es, sizeof (virtchnl_eth_stats_t));
806 avf_device_reset (vlib_main_t * vm, avf_device_t * ad)
808 avf_aq_desc_t d = { 0 };
811 f64 t0, t = 0, suspend_time = AVF_RESET_SUSPEND_TIME;
813 avf_log_debug (ad, "reset");
816 d.v_opcode = VIRTCHNL_OP_RESET_VF;
817 if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
820 t0 = vlib_time_now (vm);
822 vlib_process_suspend (vm, suspend_time);
824 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
826 if (rstat == 2 || rstat == 3)
828 avf_log_debug (ad, "reset completed in %.3fs", t);
832 t = vlib_time_now (vm) - t0;
833 if (t > AVF_RESET_MAX_WAIT_TIME)
835 avf_log_err (ad, "reset failed (timeout %.3fs)", t);
836 return clib_error_return (0, "reset failed (timeout)");
844 avf_request_queues (vlib_main_t * vm, avf_device_t * ad, u16 num_queue_pairs)
846 virtchnl_vf_res_request_t res_req = { 0 };
849 f64 t0, t, suspend_time = AVF_RESET_SUSPEND_TIME;
851 res_req.num_queue_pairs = num_queue_pairs;
853 avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
855 error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
856 sizeof (virtchnl_vf_res_request_t), &res_req,
857 sizeof (virtchnl_vf_res_request_t));
860 * if PF responds, the request failed
861 * else PF initializes restart and avf_send_to_pf returns an error
865 return clib_error_return (0, "requested more than %u queue pairs",
866 res_req.num_queue_pairs);
869 t0 = vlib_time_now (vm);
871 vlib_process_suspend (vm, suspend_time);
872 t = vlib_time_now (vm) - t0;
874 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
876 if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
879 if (t > AVF_RESET_MAX_WAIT_TIME)
881 avf_log_err (ad, "request queues failed (timeout %.3f seconds)", t);
882 return clib_error_return (0, "request queues failed (timeout)");
893 avf_device_init (vlib_main_t * vm, avf_main_t * am, avf_device_t * ad,
894 avf_create_if_args_t * args)
896 virtchnl_version_info_t ver = { 0 };
897 virtchnl_vf_resource_t res = { 0 };
899 vlib_thread_main_t *tm = vlib_get_thread_main ();
902 avf_adminq_init (vm, ad);
904 if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
907 /* we failed to get more queues, but still we want to proceed */
908 clib_error_free (error);
910 if ((error = avf_device_reset (vm, ad)))
914 avf_adminq_init (vm, ad);
919 if ((error = avf_op_version (vm, ad, &ver)))
922 if (ver.major != VIRTCHNL_VERSION_MAJOR ||
923 ver.minor != VIRTCHNL_VERSION_MINOR)
924 return clib_error_return (0, "incompatible protocol version "
925 "(remote %d.%d)", ver.major, ver.minor);
928 * OP_GET_VF_RESOURCES
930 if ((error = avf_op_get_vf_resources (vm, ad, &res)))
933 if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
934 return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
936 ad->vsi_id = res.vsi_res[0].vsi_id;
937 ad->feature_bitmap = res.vf_offload_flags;
938 ad->num_queue_pairs = res.num_queue_pairs;
939 ad->max_vectors = res.max_vectors;
940 ad->max_mtu = res.max_mtu;
941 ad->rss_key_size = res.rss_key_size;
942 ad->rss_lut_size = res.rss_lut_size;
943 wb_on_itr = (ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) != 0;
945 clib_memcpy_fast (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
948 * Disable VLAN stripping
950 if ((error = avf_op_disable_vlan_stripping (vm, ad)))
956 if (args->rxq_num == 0)
960 else if (args->rxq_num > ad->num_queue_pairs)
962 args->rxq_num = ad->num_queue_pairs;
963 avf_log_warn (ad, "Requested more rx queues than queue pairs available."
964 "Using %u rx queues.", args->rxq_num);
967 for (i = 0; i < args->rxq_num; i++)
968 if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
971 for (i = 0; i < tm->n_vlib_mains; i++)
972 if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
975 if (ad->max_vectors > ad->n_rx_queues)
977 ad->flags |= AVF_DEVICE_F_RX_INT;
978 ad->n_rx_irqs = args->rxq_num;
984 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
985 (error = avf_op_config_rss_lut (vm, ad)))
988 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
989 (error = avf_op_config_rss_key (vm, ad)))
992 if ((error = avf_op_config_vsi_queues (vm, ad)))
995 if ((error = avf_op_config_irq_map (vm, ad)))
998 avf_irq_0_set_state (ad, AVF_IRQ_STATE_ENABLED);
1000 for (i = 0; i < ad->n_rx_irqs; i++)
1001 avf_irq_n_set_state (ad, i, wb_on_itr ? AVF_IRQ_STATE_WB_ON_ITR :
1002 AVF_IRQ_STATE_ENABLED);
1004 if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
1007 if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
1008 pow2_mask (ad->n_tx_queues))))
1011 ad->flags |= AVF_DEVICE_F_INITIALIZED;
1016 avf_process_one_device (vlib_main_t * vm, avf_device_t * ad, int is_irq)
1018 avf_main_t *am = &avf_main;
1019 vnet_main_t *vnm = vnet_get_main ();
1020 virtchnl_pf_event_t *e;
1023 if (ad->flags & AVF_DEVICE_F_ERROR)
1026 if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
1029 ASSERT (ad->error == 0);
1031 /* do not process device in reset state */
1032 r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
1033 if (r != VIRTCHNL_VFR_VFACTIVE)
1036 r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
1037 if ((r & 0xf0000000) != (1ULL << 31))
1039 ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
1040 avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1044 r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
1045 if ((r & 0xf0000000) != (1ULL << 31))
1047 ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
1048 avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1053 avf_op_get_stats (vm, ad, &ad->eth_stats);
1056 vec_foreach (e, ad->events)
1058 avf_log_debug (ad, "event: %s (%u) sev %d",
1059 virtchnl_event_names[e->event], e->event, e->severity);
1060 if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
1063 virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
1067 if (ad->feature_bitmap & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1068 link_up = e->event_data.link_event_adv.link_status;
1070 link_up = e->event_data.link_event.link_status;
1072 if (ad->feature_bitmap & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1073 mbps = e->event_data.link_event_adv.link_speed;
1074 if (speed == VIRTCHNL_LINK_SPEED_40GB)
1076 else if (speed == VIRTCHNL_LINK_SPEED_25GB)
1078 else if (speed == VIRTCHNL_LINK_SPEED_10GB)
1080 else if (speed == VIRTCHNL_LINK_SPEED_5GB)
1082 else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
1084 else if (speed == VIRTCHNL_LINK_SPEED_1GB)
1086 else if (speed == VIRTCHNL_LINK_SPEED_100MB)
1089 avf_log_debug (ad, "event_link_change: status %d speed %u mbps",
1092 if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
1094 ad->flags |= AVF_DEVICE_F_LINK_UP;
1095 flags |= (VNET_HW_INTERFACE_FLAG_FULL_DUPLEX |
1096 VNET_HW_INTERFACE_FLAG_LINK_UP);
1097 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
1098 vnet_hw_interface_set_link_speed (vnm, ad->hw_if_index,
1100 ad->link_speed = mbps;
1102 else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
1104 ad->flags &= ~AVF_DEVICE_F_LINK_UP;
1108 if (ad->flags & AVF_DEVICE_F_ELOG)
1110 ELOG_TYPE_DECLARE (el) =
1112 .format = "avf[%d] link change: link_status %d "
1113 "link_speed %d mbps",
1114 .format_args = "i4i1i4",
1122 ed = ELOG_DATA (&vm->elog_main, el);
1123 ed->dev_instance = ad->dev_instance;
1124 ed->link_status = link_up;
1125 ed->link_speed = mbps;
1130 if (ad->flags & AVF_DEVICE_F_ELOG)
1132 ELOG_TYPE_DECLARE (el) =
1134 .format = "avf[%d] unknown event: event %d severity %d",
1135 .format_args = "i4i4i1i1",
1143 ed = ELOG_DATA (&vm->elog_main, el);
1144 ed->dev_instance = ad->dev_instance;
1145 ed->event = e->event;
1146 ed->severity = e->severity;
1151 vec_reset_length (ad->events);
1156 ad->flags |= AVF_DEVICE_F_ERROR;
1157 ASSERT (ad->error != 0);
1158 vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
1162 avf_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
1164 vlib_main_t *vm = vlib_get_main ();
1165 avf_device_t *ad = avf_get_device (hw->dev_instance);
1170 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
1171 ad->flags &= ~AVF_DEVICE_F_PROMISC;
1173 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
1174 ad->flags |= AVF_DEVICE_F_PROMISC;
1180 promisc_enabled = ((ad->flags & AVF_DEVICE_F_PROMISC) != 0);
1182 vlib_process_signal_event (vm, avf_process_node.index,
1184 AVF_PROCESS_EVENT_SET_PROMISC_ENABLE :
1185 AVF_PROCESS_EVENT_SET_PROMISC_DISABLE,
1191 avf_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
1193 avf_main_t *am = &avf_main;
1194 uword *event_data = 0, event_type;
1195 int enabled = 0, irq;
1196 f64 last_run_duration = 0;
1197 f64 last_periodic_time = 0;
1198 avf_device_t **dev_pointers = 0;
1204 vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1206 vlib_process_wait_for_event (vm);
1208 event_type = vlib_process_get_events (vm, &event_data);
1214 last_periodic_time = vlib_time_now (vm);
1216 case AVF_PROCESS_EVENT_START:
1219 case AVF_PROCESS_EVENT_DELETE_IF:
1220 for (int i = 0; i < vec_len (event_data); i++)
1222 avf_device_t *ad = avf_get_device (event_data[i]);
1223 avf_delete_if (vm, ad, /* with_barrier */ 1);
1225 if (pool_elts (am->devices) < 1)
1228 case AVF_PROCESS_EVENT_AQ_INT:
1231 case AVF_PROCESS_EVENT_SET_PROMISC_ENABLE:
1232 case AVF_PROCESS_EVENT_SET_PROMISC_DISABLE:
1233 for (int i = 0; i < vec_len (event_data); i++)
1235 avf_device_t *ad = avf_get_device (event_data[i]);
1239 if (event_type == AVF_PROCESS_EVENT_SET_PROMISC_ENABLE)
1242 if ((err = avf_config_promisc_mode (vm, ad, is_enable)))
1244 avf_log_err (ad, "error: %U", format_clib_error, err);
1245 clib_error_free (err);
1254 vec_reset_length (event_data);
1259 /* create local list of device pointers as device pool may grow
1261 vec_reset_length (dev_pointers);
1263 pool_foreach_index (i, am->devices,
1265 vec_add1 (dev_pointers, avf_get_device (i));
1268 vec_foreach_index (i, dev_pointers)
1270 avf_process_one_device (vm, dev_pointers[i], irq);
1273 last_run_duration = vlib_time_now (vm) - last_periodic_time;
1279 VLIB_REGISTER_NODE (avf_process_node) = {
1280 .function = avf_process,
1281 .type = VLIB_NODE_TYPE_PROCESS,
1282 .name = "avf-process",
1287 avf_irq_0_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
1289 uword pd = vlib_pci_get_private_data (vm, h);
1290 avf_device_t *ad = avf_get_device (pd);
1293 icr0 = avf_reg_read (ad, AVFINT_ICR0);
1295 if (ad->flags & AVF_DEVICE_F_ELOG)
1298 ELOG_TYPE_DECLARE (el) =
1300 .format = "avf[%d] irq 0: icr0 0x%x",
1301 .format_args = "i4i4",
1310 ed = ELOG_DATA (&vm->elog_main, el);
1311 ed->dev_instance = ad->dev_instance;
1315 avf_irq_0_set_state (ad, AVF_IRQ_STATE_ENABLED);
1317 /* bit 30 - Send/Receive Admin queue interrupt indication */
1318 if (icr0 & (1 << 30))
1319 vlib_process_signal_event (vm, avf_process_node.index,
1320 AVF_PROCESS_EVENT_AQ_INT, 0);
1324 avf_irq_n_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
1326 vnet_main_t *vnm = vnet_get_main ();
1327 uword pd = vlib_pci_get_private_data (vm, h);
1328 avf_device_t *ad = avf_get_device (pd);
1330 if (ad->flags & AVF_DEVICE_F_ELOG)
1333 ELOG_TYPE_DECLARE (el) =
1335 .format = "avf[%d] irq %d: received",
1336 .format_args = "i4i2",
1345 ed = ELOG_DATA (&vm->elog_main, el);
1346 ed->dev_instance = ad->dev_instance;
1352 if (ad->flags & AVF_DEVICE_F_RX_INT && ad->rxqs[line].int_mode)
1353 vnet_device_input_set_interrupt_pending (vnm, ad->hw_if_index, line);
1354 avf_irq_n_set_state (ad, line, AVF_IRQ_STATE_ENABLED);
1358 avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier)
1360 vnet_main_t *vnm = vnet_get_main ();
1361 avf_main_t *am = &avf_main;
1364 ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1366 if (ad->hw_if_index)
1369 vlib_worker_thread_barrier_sync (vm);
1370 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1371 vnet_hw_interface_unassign_rx_thread (vnm, ad->hw_if_index, 0);
1372 ethernet_delete_interface (vnm, ad->hw_if_index);
1374 vlib_worker_thread_barrier_release (vm);
1377 vlib_pci_device_close (vm, ad->pci_dev_handle);
1379 vlib_physmem_free (vm, ad->atq);
1380 vlib_physmem_free (vm, ad->arq);
1381 vlib_physmem_free (vm, ad->atq_bufs);
1382 vlib_physmem_free (vm, ad->arq_bufs);
1385 vec_foreach_index (i, ad->rxqs)
1387 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1388 vlib_physmem_free (vm, (void *) rxq->descs);
1389 if (rxq->n_enqueued)
1390 vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1392 vec_free (rxq->bufs);
1395 vec_free (ad->rxqs);
1398 vec_foreach_index (i, ad->txqs)
1400 avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1401 vlib_physmem_free (vm, (void *) txq->descs);
1402 if (txq->n_enqueued)
1404 u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1405 vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1408 vec_free (txq->bufs);
1409 clib_ring_free (txq->rs_slots);
1412 vec_free (ad->txqs);
1413 vec_free (ad->name);
1415 clib_error_free (ad->error);
1416 clib_memset (ad, 0, sizeof (*ad));
1417 pool_put_index (am->devices, ad->dev_instance);
1422 avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args)
1424 vnet_main_t *vnm = vnet_get_main ();
1425 avf_main_t *am = &avf_main;
1426 avf_device_t *ad, **adp;
1427 vlib_pci_dev_handle_t h;
1428 clib_error_t *error = 0;
1431 /* check input args */
1432 args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1433 args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1435 if ((args->rxq_size & (args->rxq_size - 1))
1436 || (args->txq_size & (args->txq_size - 1)))
1438 args->rv = VNET_API_ERROR_INVALID_VALUE;
1440 clib_error_return (error, "queue size must be a power of two");
1444 pool_get (am->devices, adp);
1445 adp[0] = ad = clib_mem_alloc_aligned (sizeof (avf_device_t),
1446 CLIB_CACHE_LINE_BYTES);
1447 clib_memset (ad, 0, sizeof (avf_device_t));
1448 ad->dev_instance = adp - am->devices;
1449 ad->per_interface_next_index = ~0;
1450 ad->name = vec_dup (args->name);
1452 if (args->enable_elog)
1453 ad->flags |= AVF_DEVICE_F_ELOG;
1455 if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1458 pool_put (am->devices, adp);
1460 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1462 clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1466 ad->pci_dev_handle = h;
1467 ad->pci_addr = args->addr;
1468 ad->numa_node = vlib_pci_get_numa_node (vm, h);
1470 vlib_pci_set_private_data (vm, h, ad->dev_instance);
1472 if ((error = vlib_pci_bus_master_enable (vm, h)))
1475 if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1478 ad->atq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
1480 CLIB_CACHE_LINE_BYTES,
1484 error = vlib_physmem_last_error (vm);
1488 if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1491 ad->arq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
1493 CLIB_CACHE_LINE_BYTES,
1497 error = vlib_physmem_last_error (vm);
1501 if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1504 ad->atq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
1506 CLIB_CACHE_LINE_BYTES,
1508 if (ad->atq_bufs == 0)
1510 error = vlib_physmem_last_error (vm);
1514 if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1517 ad->arq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
1519 CLIB_CACHE_LINE_BYTES,
1521 if (ad->arq_bufs == 0)
1523 error = vlib_physmem_last_error (vm);
1527 if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1530 if (vlib_pci_supports_virtual_addr_dma (vm, h))
1531 ad->flags |= AVF_DEVICE_F_VA_DMA;
1533 if ((error = avf_device_init (vm, am, ad, args)))
1536 if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1537 &avf_irq_0_handler)))
1540 if ((error = vlib_pci_register_msix_handler (vm, h, 1, ad->n_rx_irqs,
1541 &avf_irq_n_handler)))
1544 if ((error = vlib_pci_enable_msix_irq (vm, h, 0, ad->n_rx_irqs + 1)))
1547 if ((error = vlib_pci_intr_enable (vm, h)))
1550 /* create interface */
1551 error = ethernet_register_interface (vnm, avf_device_class.index,
1552 ad->dev_instance, ad->hwaddr,
1553 &ad->hw_if_index, avf_flag_change);
1558 /* Indicate ability to support L3 DMAC filtering and
1559 * initialize interface to L3 non-promisc mode */
1560 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ad->hw_if_index);
1561 hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
1562 ethernet_set_flags (vnm, ad->hw_if_index,
1563 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
1565 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
1566 args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1568 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, ad->hw_if_index);
1569 hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
1570 vnet_hw_interface_set_input_node (vnm, ad->hw_if_index,
1571 avf_input_node.index);
1573 for (i = 0; i < ad->n_rx_queues; i++)
1574 vnet_hw_interface_assign_rx_thread (vnm, ad->hw_if_index, i, ~0);
1576 if (pool_elts (am->devices) == 1)
1577 vlib_process_signal_event (vm, avf_process_node.index,
1578 AVF_PROCESS_EVENT_START, 0);
1583 avf_delete_if (vm, ad, /* with_barrier */ 0);
1584 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1585 args->error = clib_error_return (error, "pci-addr %U",
1586 format_vlib_pci_addr, &args->addr);
1587 avf_log_err (ad, "error: %U", format_clib_error, args->error);
1590 static clib_error_t *
1591 avf_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1593 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1594 avf_device_t *ad = avf_get_device (hi->dev_instance);
1595 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1597 if (ad->flags & AVF_DEVICE_F_ERROR)
1598 return clib_error_return (0, "device is in error state");
1602 vnet_hw_interface_set_flags (vnm, ad->hw_if_index,
1603 VNET_HW_INTERFACE_FLAG_LINK_UP);
1604 ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1608 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1609 ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1614 static clib_error_t *
1615 avf_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
1616 vnet_hw_interface_rx_mode mode)
1618 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1619 avf_device_t *ad = avf_get_device (hw->dev_instance);
1620 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1622 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1624 if (rxq->int_mode == 0)
1626 if (ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1627 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_WB_ON_ITR);
1629 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_ENABLED);
1634 if (rxq->int_mode == 1)
1636 if (ad->n_rx_irqs != ad->n_rx_queues)
1637 return clib_error_return (0, "not enough interrupt lines");
1639 avf_irq_n_set_state (ad, qid, AVF_IRQ_STATE_ENABLED);
1646 avf_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1649 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1650 avf_device_t *ad = avf_get_device (hw->dev_instance);
1652 /* Shut off redirection */
1653 if (node_index == ~0)
1655 ad->per_interface_next_index = node_index;
1659 ad->per_interface_next_index =
1660 vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1663 static char *avf_tx_func_error_strings[] = {
1665 foreach_avf_tx_func_error
1670 avf_clear_hw_interface_counters (u32 instance)
1672 avf_device_t *ad = avf_get_device (instance);
1673 clib_memcpy_fast (&ad->last_cleared_eth_stats,
1674 &ad->eth_stats, sizeof (ad->eth_stats));
1678 VNET_DEVICE_CLASS (avf_device_class,) =
1680 .name = "Adaptive Virtual Function (AVF) interface",
1681 .clear_counters = avf_clear_hw_interface_counters,
1682 .format_device = format_avf_device,
1683 .format_device_name = format_avf_device_name,
1684 .admin_up_down_function = avf_interface_admin_up_down,
1685 .rx_mode_change_function = avf_interface_rx_mode_change,
1686 .rx_redirect_to_node = avf_set_interface_next_node,
1687 .tx_function_n_errors = AVF_TX_N_ERROR,
1688 .tx_function_error_strings = avf_tx_func_error_strings,
1693 avf_init (vlib_main_t * vm)
1695 avf_main_t *am = &avf_main;
1696 vlib_thread_main_t *tm = vlib_get_thread_main ();
1698 vec_validate_aligned (am->per_thread_data, tm->n_vlib_mains - 1,
1699 CLIB_CACHE_LINE_BYTES);
1701 am->log_class = vlib_log_register_class ("avf", 0);
1702 vlib_log_debug (am->log_class, "initialized");
1708 VLIB_INIT_FUNCTION (avf_init) =
1710 .runs_after = VLIB_INITS ("pci_bus_init"),
1715 * fd.io coding-style-patch-verification: ON
1718 * eval: (c-set-style "gnu")