2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
25 #define AVF_MBOX_LEN 64
26 #define AVF_MBOX_BUF_SZ 512
27 #define AVF_RXQ_SZ 512
28 #define AVF_TXQ_SZ 512
29 #define AVF_ITR_INT 8160
31 #define PCI_VENDOR_ID_INTEL 0x8086
32 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
33 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
34 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
38 static pci_device_id_t avf_pci_device_ids[] = {
39 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_AVF},
40 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
41 {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
46 avf_irq_0_disable (avf_device_t * ad)
48 u32 dyn_ctl0 = 0, icr0_ena = 0;
50 dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
52 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
53 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
58 avf_irq_0_enable (avf_device_t * ad)
60 u32 dyn_ctl0 = 0, icr0_ena = 0;
62 icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
64 dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
65 dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
66 //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
67 dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
69 avf_irq_0_disable (ad);
70 avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
71 avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
76 avf_irq_n_disable (avf_device_t * ad, u8 line)
80 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
85 avf_irq_n_enable (avf_device_t * ad, u8 line)
89 dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
90 dyn_ctln |= (1 << 1); /* [1] Clear PBA */
91 dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
93 avf_irq_n_disable (ad, line);
94 avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
100 avf_aq_desc_enq (vlib_main_t * vm, avf_device_t * ad, avf_aq_desc_t * dt,
103 avf_main_t *am = &avf_main;
104 clib_error_t *err = 0;
105 avf_aq_desc_t *d, dc;
108 d = &ad->atq[ad->atq_next_slot];
109 clib_memcpy (d, dt, sizeof (avf_aq_desc_t));
110 d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
116 pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
117 d->addr_hi = (u32) (pa >> 32);
118 d->addr_lo = (u32) pa;
119 clib_memcpy (ad->atq_bufs + ad->atq_next_slot * AVF_MBOX_BUF_SZ, data,
121 d->flags |= AVF_AQ_F_BUF;
124 if (ad->flags & AVF_DEVICE_F_ELOG)
125 clib_memcpy (&dc, d, sizeof (avf_aq_desc_t));
127 CLIB_MEMORY_BARRIER ();
128 vlib_log_debug (am->log_class, "%U", format_hexdump, data, len);
129 ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
130 avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
134 vlib_process_suspend (vm, 10e-6);
136 if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
140 err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
147 clib_memcpy (dt, d, sizeof (avf_aq_desc_t));
148 if (d->flags & AVF_AQ_F_ERR)
149 return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
150 "%d]", d->opcode, d->retval);
153 if (ad->flags & AVF_DEVICE_F_ELOG)
156 ELOG_TYPE_DECLARE (el) =
158 .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
159 "datalen %d retval %d",
160 .format_args = "i4i2i2i2i2i2",
171 ed = ELOG_DATA (&vm->elog_main, el);
172 ed->dev_instance = ad->dev_instance;
173 ed->s_flags = dc.flags;
174 ed->r_flags = d->flags;
175 ed->opcode = dc.opcode;
176 ed->datalen = dc.datalen;
177 ed->retval = d->retval;
185 avf_cmd_rx_ctl_reg_write (vlib_main_t * vm, avf_device_t * ad, u32 reg,
189 avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
190 err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
192 if (ad->flags & AVF_DEVICE_F_ELOG)
195 ELOG_TYPE_DECLARE (el) =
197 .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
198 .format_args = "i4i4i4",
206 ed = ELOG_DATA (&vm->elog_main, el);
207 ed->dev_instance = ad->dev_instance;
216 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
218 avf_main_t *am = &avf_main;
220 clib_error_t *error = 0;
223 vec_validate_aligned (ad->rxqs, qid, CLIB_CACHE_LINE_BYTES);
224 rxq = vec_elt_at_index (ad->rxqs, qid);
225 rxq->size = rxq_size;
227 rxq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
228 rxq->size * sizeof (avf_rx_desc_t),
229 2 * CLIB_CACHE_LINE_BYTES);
230 memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
231 vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
232 rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
234 n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
237 return clib_error_return (0, "buffer allocation error");
239 rxq->n_enqueued = n_alloc;
240 avf_rx_desc_t *d = rxq->descs;
241 for (i = 0; i < n_alloc; i++)
243 if (ad->flags & AVF_DEVICE_F_IOVA)
245 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
246 d->qword[0] = pointer_to_uword (b->data);
250 vlib_get_buffer_data_physical_address (vm, rxq->bufs[i]);
254 ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
259 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
261 avf_main_t *am = &avf_main;
263 clib_error_t *error = 0;
265 if (qid >= ad->num_queue_pairs)
267 qid = qid % ad->num_queue_pairs;
268 txq = vec_elt_at_index (ad->txqs, qid);
270 clib_spinlock_init (&txq->lock);
271 ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
275 vec_validate_aligned (ad->txqs, qid, CLIB_CACHE_LINE_BYTES);
276 txq = vec_elt_at_index (ad->txqs, qid);
277 txq->size = txq_size;
279 txq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
280 txq->size * sizeof (avf_tx_desc_t),
281 2 * CLIB_CACHE_LINE_BYTES);
282 vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
283 txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
285 ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
293 } virtchnl_promisc_info_t;
296 avf_arq_slot_init (avf_device_t * ad, u16 slot)
299 u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
301 memset (d, 0, sizeof (avf_aq_desc_t));
302 d->flags = AVF_AQ_F_BUF;
303 d->datalen = AVF_MBOX_BUF_SZ;
304 d->addr_hi = (u32) (pa >> 32);
305 d->addr_lo = (u32) pa;
309 avf_dma_addr (vlib_main_t * vm, avf_device_t * ad, void *p)
311 avf_main_t *am = &avf_main;
312 return (ad->flags & AVF_DEVICE_F_IOVA) ?
313 pointer_to_uword (p) :
314 vlib_physmem_virtual_to_physical (vm, am->physmem_region, p);
318 avf_adminq_init (vlib_main_t * vm, avf_device_t * ad)
323 /* VF MailBox Transmit */
324 memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
325 ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
327 pa = avf_dma_addr (vm, ad, ad->atq);
328 avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
329 avf_reg_write (ad, AVF_ATQH, 0); /* Head */
330 avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
331 avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
332 avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
334 /* VF MailBox Receive */
335 memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
336 ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
338 for (i = 0; i < AVF_MBOX_LEN; i++)
339 avf_arq_slot_init (ad, i);
341 pa = avf_dma_addr (vm, ad, ad->arq);
343 avf_reg_write (ad, AVF_ARQH, 0); /* Head */
344 avf_reg_write (ad, AVF_ARQT, 0); /* Head */
345 avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
346 avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
347 avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
348 avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
350 ad->atq_next_slot = 0;
351 ad->arq_next_slot = 0;
355 avf_send_to_pf (vlib_main_t * vm, avf_device_t * ad, virtchnl_ops_t op,
356 void *in, int in_len, void *out, int out_len)
359 avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
364 /* supppres interrupt in the next adminq receive slot
365 as we are going to wait for response
366 we only need interrupts when event is received */
367 d = &ad->arq[ad->arq_next_slot];
368 d->flags |= AVF_AQ_F_SI;
370 if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
374 head = avf_get_u32 (ad->bar0, AVF_ARQH);
376 if (ad->arq_next_slot == head)
379 return clib_error_return (0, "timeout");
380 vlib_process_suspend (vm, 10e-3);
384 d = &ad->arq[ad->arq_next_slot];
386 if (d->v_opcode == VIRTCHNL_OP_EVENT)
388 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
389 virtchnl_pf_event_t *e;
391 if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
392 ((d->flags & AVF_AQ_F_BUF) == 0))
393 return clib_error_return (0, "event message error");
395 vec_add2 (ad->events, e, 1);
396 clib_memcpy (e, buf, sizeof (virtchnl_pf_event_t));
397 avf_arq_slot_init (ad, ad->arq_next_slot);
403 if (d->v_opcode != op)
406 clib_error_return (0,
407 "unexpected message receiver [v_opcode = %u, "
408 "expected %u, v_retval %d]", d->v_opcode, op,
415 err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
416 d->v_opcode, d->v_retval);
420 if (d->flags & AVF_AQ_F_BUF)
422 void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
423 clib_memcpy (out, buf, out_len);
426 avf_arq_slot_init (ad, ad->arq_next_slot);
427 avf_reg_write (ad, AVF_ARQT, ad->arq_next_slot);
429 ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
433 if (ad->flags & AVF_DEVICE_F_ELOG)
436 ELOG_TYPE_DECLARE (el) =
438 .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
439 .format_args = "i4t4i4i4",
440 .n_enum_strings = VIRTCHNL_N_OPS,
442 #define _(v, n) [v] = #n,
454 ed = ELOG_DATA (&vm->elog_main, el);
455 ed->dev_instance = ad->dev_instance;
457 ed->v_opcode_val = op;
458 ed->v_retval = d->v_retval;
465 avf_op_version (vlib_main_t * vm, avf_device_t * ad,
466 virtchnl_version_info_t * ver)
468 clib_error_t *err = 0;
469 virtchnl_version_info_t myver = {
470 .major = VIRTCHNL_VERSION_MAJOR,
471 .minor = VIRTCHNL_VERSION_MINOR,
474 err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
475 sizeof (virtchnl_version_info_t), ver,
476 sizeof (virtchnl_version_info_t));
485 avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
486 virtchnl_vf_resource_t * res)
488 u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
489 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
490 VIRTCHNL_VF_OFFLOAD_RX_POLLING);
492 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
493 sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
497 avf_op_config_rss_lut (vlib_main_t * vm, avf_device_t * ad)
499 int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
502 virtchnl_rss_lut_t *rl;
504 memset (msg, 0, msg_len);
505 rl = (virtchnl_rss_lut_t *) msg;
506 rl->vsi_id = ad->vsi_id;
507 rl->lut_entries = ad->rss_lut_size;
508 for (i = 0; i < ad->rss_lut_size; i++)
509 rl->lut[i] = i % ad->n_rx_queues;
511 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
516 avf_op_config_rss_key (vlib_main_t * vm, avf_device_t * ad)
518 int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
521 virtchnl_rss_key_t *rk;
523 memset (msg, 0, msg_len);
524 rk = (virtchnl_rss_key_t *) msg;
525 rk->vsi_id = ad->vsi_id;
526 rk->key_len = ad->rss_key_size;
527 uword seed = random_default_seed ();
528 for (i = 0; i < ad->rss_key_size; i++)
529 rk->key[i] = (u8) random_uword ((u32 *) & seed);
531 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
536 avf_op_disable_vlan_stripping (vlib_main_t * vm, avf_device_t * ad)
538 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
543 avf_config_promisc_mode (vlib_main_t * vm, avf_device_t * ad)
545 virtchnl_promisc_info_t pi = { 0 };
547 pi.vsi_id = ad->vsi_id;
549 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
550 sizeof (virtchnl_promisc_info_t), 0, 0);
555 avf_op_config_vsi_queues (vlib_main_t * vm, avf_device_t * ad)
558 int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
559 int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
560 sizeof (virtchnl_queue_pair_info_t);
562 virtchnl_vsi_queue_config_info_t *ci;
564 memset (msg, 0, msg_len);
565 ci = (virtchnl_vsi_queue_config_info_t *) msg;
566 ci->vsi_id = ad->vsi_id;
567 ci->num_queue_pairs = n_qp;
569 for (i = 0; i < n_qp; i++)
571 virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
572 virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
574 rxq->vsi_id = ad->vsi_id;
576 rxq->max_pkt_size = 1518;
577 if (i < vec_len (ad->rxqs))
579 avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
580 rxq->ring_len = q->size;
581 rxq->databuffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
582 rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
583 avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
586 avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
587 txq->vsi_id = ad->vsi_id;
588 if (i < vec_len (ad->txqs))
591 txq->ring_len = q->size;
592 txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
596 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
601 avf_op_config_irq_map (vlib_main_t * vm, avf_device_t * ad)
604 int msg_len = sizeof (virtchnl_irq_map_info_t) +
605 count * sizeof (virtchnl_vector_map_t);
607 virtchnl_irq_map_info_t *imi;
609 memset (msg, 0, msg_len);
610 imi = (virtchnl_irq_map_info_t *) msg;
611 imi->num_vectors = count;
613 imi->vecmap[0].vector_id = 1;
614 imi->vecmap[0].vsi_id = ad->vsi_id;
615 imi->vecmap[0].rxq_map = 1;
616 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
621 avf_op_add_eth_addr (vlib_main_t * vm, avf_device_t * ad, u8 count, u8 * macs)
624 sizeof (virtchnl_ether_addr_list_t) +
625 count * sizeof (virtchnl_ether_addr_t);
627 virtchnl_ether_addr_list_t *al;
630 memset (msg, 0, msg_len);
631 al = (virtchnl_ether_addr_list_t *) msg;
632 al->vsi_id = ad->vsi_id;
633 al->num_elements = count;
634 for (i = 0; i < count; i++)
635 clib_memcpy (&al->list[i].addr, macs + i * 6, 6);
636 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
641 avf_op_enable_queues (vlib_main_t * vm, avf_device_t * ad, u32 rx, u32 tx)
643 virtchnl_queue_select_t qs = { 0 };
645 qs.vsi_id = ad->vsi_id;
648 for (i = 0; i < ad->n_rx_queues; i++)
650 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
651 avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
653 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
654 sizeof (virtchnl_queue_select_t), 0, 0);
658 avf_op_get_stats (vlib_main_t * vm, avf_device_t * ad,
659 virtchnl_eth_stats_t * es)
661 virtchnl_queue_select_t qs = { 0 };
662 qs.vsi_id = ad->vsi_id;
663 return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
664 &qs, sizeof (virtchnl_queue_select_t),
665 es, sizeof (virtchnl_eth_stats_t));
669 avf_device_reset (vlib_main_t * vm, avf_device_t * ad)
671 avf_aq_desc_t d = { 0 };
677 d.v_opcode = VIRTCHNL_OP_RESET_VF;
678 if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
682 vlib_process_suspend (vm, 10e-3);
683 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
685 if (rstat == 2 || rstat == 3)
689 return clib_error_return (0, "reset failed (timeout)");
695 avf_request_queues (vlib_main_t * vm, avf_device_t * ad, u16 num_queue_pairs)
697 virtchnl_vf_res_request_t res_req = { 0 };
702 res_req.num_queue_pairs = num_queue_pairs;
704 error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
705 sizeof (virtchnl_vf_res_request_t), &res_req,
706 sizeof (virtchnl_vf_res_request_t));
709 * if PF respondes, the request failed
710 * else PF initializes restart and avf_send_to_pf returns an error
714 return clib_error_return (0, "requested more than %u queue pairs",
715 res_req.num_queue_pairs);
719 vlib_process_suspend (vm, 10e-3);
720 rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
722 if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
726 return clib_error_return (0, "reset failed (timeout)");
735 avf_device_init (vlib_main_t * vm, avf_main_t * am, avf_device_t * ad,
736 avf_create_if_args_t * args)
738 virtchnl_version_info_t ver = { 0 };
739 virtchnl_vf_resource_t res = { 0 };
741 vlib_thread_main_t *tm = vlib_get_thread_main ();
744 avf_adminq_init (vm, ad);
746 /* request more queues only if we need them */
747 if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
749 /* we failed to get more queues, but still we want to proceed */
750 clib_error_free (error);
752 if ((error = avf_device_reset (vm, ad)))
756 avf_adminq_init (vm, ad);
761 if ((error = avf_op_version (vm, ad, &ver)))
764 if (ver.major != VIRTCHNL_VERSION_MAJOR ||
765 ver.minor != VIRTCHNL_VERSION_MINOR)
766 return clib_error_return (0, "incompatible protocol version "
767 "(remote %d.%d)", ver.major, ver.minor);
772 if ((error = avf_op_get_vf_resources (vm, ad, &res)))
775 if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
776 return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
778 ad->vsi_id = res.vsi_res[0].vsi_id;
779 ad->feature_bitmap = res.vf_offload_flags;
780 ad->num_queue_pairs = res.num_queue_pairs;
781 ad->max_vectors = res.max_vectors;
782 ad->max_mtu = res.max_mtu;
783 ad->rss_key_size = res.rss_key_size;
784 ad->rss_lut_size = res.rss_lut_size;
786 clib_memcpy (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
789 * Disable VLAN stripping
791 if ((error = avf_op_disable_vlan_stripping (vm, ad)))
794 if ((error = avf_config_promisc_mode (vm, ad)))
800 if (args->rxq_num == 0)
804 else if (args->rxq_num > ad->num_queue_pairs)
806 args->rxq_num = ad->num_queue_pairs;
807 vlib_log_warn (am->log_class, "Requested more rx queues than"
808 "queue pairs available. Using %u rx queues.",
812 for (i = 0; i < args->rxq_num; i++)
813 if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
816 for (i = 0; i < tm->n_vlib_mains; i++)
817 if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
820 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
821 (error = avf_op_config_rss_lut (vm, ad)))
824 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
825 (error = avf_op_config_rss_key (vm, ad)))
828 if ((error = avf_op_config_vsi_queues (vm, ad)))
831 if ((error = avf_op_config_irq_map (vm, ad)))
834 avf_irq_0_enable (ad);
835 for (i = 0; i < ad->n_rx_queues; i++)
836 avf_irq_n_enable (ad, i);
838 if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
841 if ((error = avf_op_enable_queues (vm, ad, ad->n_rx_queues, 0)))
844 if ((error = avf_op_enable_queues (vm, ad, 0, ad->n_tx_queues)))
847 ad->flags |= AVF_DEVICE_F_INITIALIZED;
852 avf_process_one_device (vlib_main_t * vm, avf_device_t * ad, int is_irq)
854 avf_main_t *am = &avf_main;
855 vnet_main_t *vnm = vnet_get_main ();
856 virtchnl_pf_event_t *e;
859 if (ad->flags & AVF_DEVICE_F_ERROR)
862 if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
865 ASSERT (ad->error == 0);
867 /* do not process device in reset state */
868 r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
869 if (r != VIRTCHNL_VFR_VFACTIVE)
872 r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
873 if ((r & 0xf0000000) != (1ULL << 31))
875 ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
879 r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
880 if ((r & 0xf0000000) != (1ULL << 31))
882 ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
887 avf_op_get_stats (vm, ad, &ad->eth_stats);
890 vec_foreach (e, ad->events)
892 if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
894 int link_up = e->event_data.link_event.link_status;
895 virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
898 if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
900 ad->flags |= AVF_DEVICE_F_LINK_UP;
901 flags |= (VNET_HW_INTERFACE_FLAG_FULL_DUPLEX |
902 VNET_HW_INTERFACE_FLAG_LINK_UP);
903 if (speed == VIRTCHNL_LINK_SPEED_40GB)
904 flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G;
905 else if (speed == VIRTCHNL_LINK_SPEED_25GB)
906 flags |= VNET_HW_INTERFACE_FLAG_SPEED_25G;
907 else if (speed == VIRTCHNL_LINK_SPEED_10GB)
908 flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G;
909 else if (speed == VIRTCHNL_LINK_SPEED_1GB)
910 flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G;
911 else if (speed == VIRTCHNL_LINK_SPEED_100MB)
912 flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M;
913 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
914 ad->link_speed = speed;
916 else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
918 ad->flags &= ~AVF_DEVICE_F_LINK_UP;
922 if (ad->flags & AVF_DEVICE_F_ELOG)
924 ELOG_TYPE_DECLARE (el) =
926 .format = "avf[%d] link change: link_status %d "
928 .format_args = "i4i1i1",
936 ed = ELOG_DATA (&vm->elog_main, el);
937 ed->dev_instance = ad->dev_instance;
938 ed->link_status = link_up;
939 ed->link_speed = speed;
944 if (ad->flags & AVF_DEVICE_F_ELOG)
946 ELOG_TYPE_DECLARE (el) =
948 .format = "avf[%d] unknown event: event %d severity %d",
949 .format_args = "i4i4i1i1",
957 ed = ELOG_DATA (&vm->elog_main, el);
958 ed->dev_instance = ad->dev_instance;
959 ed->event = e->event;
960 ed->severity = e->severity;
965 vec_reset_length (ad->events);
970 ad->flags |= AVF_DEVICE_F_ERROR;
971 ASSERT (ad->error != 0);
972 vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
976 avf_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
978 avf_main_t *am = &avf_main;
979 vlib_log_warn (am->log_class, "TODO");
984 avf_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
986 avf_main_t *am = &avf_main;
988 uword *event_data = 0, event_type;
989 int enabled = 0, irq;
990 f64 last_run_duration = 0;
991 f64 last_periodic_time = 0;
996 vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
998 vlib_process_wait_for_event (vm);
1000 event_type = vlib_process_get_events (vm, &event_data);
1001 vec_reset_length (event_data);
1007 last_periodic_time = vlib_time_now (vm);
1009 case AVF_PROCESS_EVENT_START:
1012 case AVF_PROCESS_EVENT_STOP:
1015 case AVF_PROCESS_EVENT_AQ_INT:
1023 pool_foreach (ad, am->devices,
1025 avf_process_one_device (vm, ad, irq);
1028 last_run_duration = vlib_time_now (vm) - last_periodic_time;
1034 VLIB_REGISTER_NODE (avf_process_node, static) = {
1035 .function = avf_process,
1036 .type = VLIB_NODE_TYPE_PROCESS,
1037 .name = "avf-process",
1042 avf_irq_0_handler (vlib_pci_dev_handle_t h, u16 line)
1044 vlib_main_t *vm = vlib_get_main ();
1045 avf_main_t *am = &avf_main;
1046 uword pd = vlib_pci_get_private_data (h);
1047 avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1050 icr0 = avf_reg_read (ad, AVFINT_ICR0);
1052 if (ad->flags & AVF_DEVICE_F_ELOG)
1055 ELOG_TYPE_DECLARE (el) =
1057 .format = "avf[%d] irq 0: icr0 0x%x",
1058 .format_args = "i4i4",
1067 ed = ELOG_DATA (&vm->elog_main, el);
1068 ed->dev_instance = ad->dev_instance;
1072 avf_irq_0_enable (ad);
1074 /* bit 30 - Send/Receive Admin queue interrupt indication */
1075 if (icr0 & (1 << 30))
1076 vlib_process_signal_event (vm, avf_process_node.index,
1077 AVF_PROCESS_EVENT_AQ_INT, 0);
1081 avf_irq_n_handler (vlib_pci_dev_handle_t h, u16 line)
1083 vnet_main_t *vnm = vnet_get_main ();
1084 vlib_main_t *vm = vlib_get_main ();
1085 avf_main_t *am = &avf_main;
1086 uword pd = vlib_pci_get_private_data (h);
1087 avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1091 if (ad->flags & AVF_DEVICE_F_ELOG)
1094 ELOG_TYPE_DECLARE (el) =
1096 .format = "avf[%d] irq %d: received",
1097 .format_args = "i4i2",
1106 ed = ELOG_DATA (&vm->elog_main, el);
1107 ed->dev_instance = ad->dev_instance;
1112 if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1113 vnet_device_input_set_interrupt_pending (vnm, ad->hw_if_index, qid);
1114 for (i = 0; i < vec_len (ad->rxqs); i++)
1115 avf_irq_n_enable (ad, i);
1119 avf_delete_if (vlib_main_t * vm, avf_device_t * ad)
1121 vnet_main_t *vnm = vnet_get_main ();
1122 avf_main_t *am = &avf_main;
1125 if (ad->hw_if_index)
1127 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1128 vnet_hw_interface_unassign_rx_thread (vnm, ad->hw_if_index, 0);
1129 ethernet_delete_interface (vnm, ad->hw_if_index);
1132 vlib_pci_device_close (ad->pci_dev_handle);
1134 vlib_physmem_free (vm, am->physmem_region, ad->atq);
1135 vlib_physmem_free (vm, am->physmem_region, ad->arq);
1136 vlib_physmem_free (vm, am->physmem_region, ad->atq_bufs);
1137 vlib_physmem_free (vm, am->physmem_region, ad->arq_bufs);
1140 vec_foreach_index (i, ad->rxqs)
1142 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1143 vlib_physmem_free (vm, am->physmem_region, (void *) rxq->descs);
1144 if (rxq->n_enqueued)
1145 vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1147 vec_free (rxq->bufs);
1150 vec_free (ad->rxqs);
1153 vec_foreach_index (i, ad->txqs)
1155 avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1156 vlib_physmem_free (vm, am->physmem_region, (void *) txq->descs);
1157 if (txq->n_enqueued)
1159 u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1160 vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1163 vec_free (txq->bufs);
1166 vec_free (ad->txqs);
1168 clib_error_free (ad->error);
1169 memset (ad, 0, sizeof (*ad));
1170 pool_put (am->devices, ad);
1174 avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args)
1176 vnet_main_t *vnm = vnet_get_main ();
1177 avf_main_t *am = &avf_main;
1179 vlib_pci_dev_handle_t h;
1180 clib_error_t *error = 0;
1183 /* check input args */
1184 args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1185 args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1187 if ((args->rxq_size & (args->rxq_size - 1))
1188 || (args->txq_size & (args->txq_size - 1)))
1190 args->rv = VNET_API_ERROR_INVALID_VALUE;
1192 clib_error_return (error, "queue size must be a power of two");
1196 pool_get (am->devices, ad);
1197 ad->dev_instance = ad - am->devices;
1198 ad->per_interface_next_index = ~0;
1200 if (args->enable_elog)
1201 ad->flags |= AVF_DEVICE_F_ELOG;
1203 if ((error = vlib_pci_device_open (&args->addr, avf_pci_device_ids, &h)))
1205 pool_put (am->devices, ad);
1206 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1208 clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1212 ad->pci_dev_handle = h;
1214 vlib_pci_set_private_data (h, ad->dev_instance);
1216 if ((error = vlib_pci_bus_master_enable (h)))
1219 if ((error = vlib_pci_map_region (h, 0, &ad->bar0)))
1222 if ((error = vlib_pci_register_msix_handler (h, 0, 1, &avf_irq_0_handler)))
1225 if ((error = vlib_pci_register_msix_handler (h, 1, 1, &avf_irq_n_handler)))
1228 if ((error = vlib_pci_enable_msix_irq (h, 0, 2)))
1231 if (am->physmem_region_alloc == 0)
1233 u32 flags = VLIB_PHYSMEM_F_INIT_MHEAP | VLIB_PHYSMEM_F_HUGETLB;
1234 error = vlib_physmem_region_alloc (vm, "avf descriptors", 4 << 20, 0,
1235 flags, &am->physmem_region);
1238 am->physmem_region_alloc = 1;
1240 ad->atq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1241 sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
1246 ad->arq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1247 sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
1252 ad->atq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1253 AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
1258 ad->arq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1259 AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
1264 if ((error = vlib_pci_intr_enable (h)))
1268 ad->flags |= AVF_DEVICE_F_IOVA;
1270 if ((error = avf_device_init (vm, am, ad, args)))
1273 /* create interface */
1274 error = ethernet_register_interface (vnm, avf_device_class.index,
1275 ad->dev_instance, ad->hwaddr,
1276 &ad->hw_if_index, avf_flag_change);
1281 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
1282 args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1284 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, ad->hw_if_index);
1285 hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
1286 vnet_hw_interface_set_input_node (vnm, ad->hw_if_index,
1287 avf_input_node.index);
1289 for (i = 0; i < ad->n_rx_queues; i++)
1290 vnet_hw_interface_assign_rx_thread (vnm, ad->hw_if_index, i, ~0);
1292 if (pool_elts (am->devices) == 1)
1293 vlib_process_signal_event (vm, avf_process_node.index,
1294 AVF_PROCESS_EVENT_START, 0);
1299 avf_delete_if (vm, ad);
1300 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1301 args->error = clib_error_return (error, "pci-addr %U",
1302 format_vlib_pci_addr, &args->addr);
1303 vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
1306 static clib_error_t *
1307 avf_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1309 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1310 avf_main_t *am = &avf_main;
1311 avf_device_t *ad = vec_elt_at_index (am->devices, hi->dev_instance);
1312 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1314 if (ad->flags & AVF_DEVICE_F_ERROR)
1315 return clib_error_return (0, "device is in error state");
1319 vnet_hw_interface_set_flags (vnm, ad->hw_if_index,
1320 VNET_HW_INTERFACE_FLAG_LINK_UP);
1321 ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1325 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
1326 ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1331 static clib_error_t *
1332 avf_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
1333 vnet_hw_interface_rx_mode mode)
1335 avf_main_t *am = &avf_main;
1336 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1337 avf_device_t *ad = pool_elt_at_index (am->devices, hw->dev_instance);
1338 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1340 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1349 avf_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1352 avf_main_t *am = &avf_main;
1353 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1354 avf_device_t *ad = pool_elt_at_index (am->devices, hw->dev_instance);
1356 /* Shut off redirection */
1357 if (node_index == ~0)
1359 ad->per_interface_next_index = node_index;
1363 ad->per_interface_next_index =
1364 vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1367 static char *avf_tx_func_error_strings[] = {
1369 foreach_avf_tx_func_error
1374 VNET_DEVICE_CLASS (avf_device_class,) =
1376 .name = "Adaptive Virtual Function (AVF) interface",
1377 .format_device = format_avf_device,
1378 .format_device_name = format_avf_device_name,
1379 .admin_up_down_function = avf_interface_admin_up_down,
1380 .rx_mode_change_function = avf_interface_rx_mode_change,
1381 .rx_redirect_to_node = avf_set_interface_next_node,
1382 .tx_function_n_errors = AVF_TX_N_ERROR,
1383 .tx_function_error_strings = avf_tx_func_error_strings,
1388 avf_init (vlib_main_t * vm)
1390 avf_main_t *am = &avf_main;
1391 clib_error_t *error;
1392 vlib_thread_main_t *tm = vlib_get_thread_main ();
1395 if ((error = vlib_call_init_function (vm, pci_bus_init)))
1398 vec_validate_aligned (am->per_thread_data, tm->n_vlib_mains - 1,
1399 CLIB_CACHE_LINE_BYTES);
1401 /* initialize ptype based loopup table */
1402 vec_validate_aligned (am->ptypes, 255, CLIB_CACHE_LINE_BYTES);
1405 vec_foreach_index (i, am->ptypes)
1407 avf_ptype_t *p = vec_elt_at_index (am->ptypes, i);
1408 if ((i >= 22) && (i <= 87))
1410 p->next_node = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
1411 p->flags = VNET_BUFFER_F_IS_IP4;
1413 else if ((i >= 88) && (i <= 153))
1415 p->next_node = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
1416 p->flags = VNET_BUFFER_F_IS_IP6;
1419 p->next_node = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1420 p->buffer_advance = device_input_next_node_advance[p->next_node];
1421 p->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1425 am->log_class = vlib_log_register_class ("avf_plugin", 0);
1426 vlib_log_debug (am->log_class, "initialized");
1431 VLIB_INIT_FUNCTION (avf_init);
1434 * fd.io coding-style-patch-verification: ON
1437 * eval: (c-set-style "gnu")