2 *------------------------------------------------------------------
3 * Copyright (c) 2023 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <idpf/idpf.h>
19 #include <vpp/app/version.h>
20 #include <vnet/plugin/plugin.h>
22 #define IDPF_RXQ_SZ 512
23 #define IDPF_TXQ_SZ 512
25 #define PCI_VENDOR_ID_INTEL 0x8086
26 #define PCI_DEVICE_ID_INTEL_IDPF_PF 0x1452
27 #define PCI_DEVICE_ID_INTEL_IDPF_VF 0x1889
29 VLIB_REGISTER_LOG_CLASS (idpf_log) = {
33 VLIB_REGISTER_LOG_CLASS (idpf_stats_log) = {
35 .subclass_name = "stats",
38 idpf_main_t idpf_main;
39 void idpf_delete_if (vlib_main_t *vm, idpf_device_t *id, int with_barrier);
41 static pci_device_id_t idpf_pci_device_ids[] = {
42 { .vendor_id = PCI_VENDOR_ID_INTEL,
43 .device_id = PCI_DEVICE_ID_INTEL_IDPF_PF },
44 { .vendor_id = PCI_VENDOR_ID_INTEL,
45 .device_id = PCI_DEVICE_ID_INTEL_IDPF_VF },
50 idpf_vc_clean (vlib_main_t *vm, idpf_device_t *id)
52 idpf_ctlq_msg_t *q_msg[IDPF_CTLQ_LEN];
53 uint16_t num_q_msg = IDPF_CTLQ_LEN;
54 idpf_dma_mem_t *dma_mem;
58 for (i = 0; i < 10; i++)
60 err = idpf_ctlq_clean_sq (id->asq, &num_q_msg, q_msg);
61 vlib_process_suspend (vm, 0.02);
68 /* Empty queue is not an error */
69 for (i = 0; i < num_q_msg; i++)
71 dma_mem = q_msg[i]->ctx.indirect.payload;
73 idpf_free_dma_mem (id, dma_mem);
74 clib_mem_free (q_msg[i]);
80 static idpf_vc_result_t
81 idpf_read_msg_from_cp (idpf_device_t *id, u16 buf_len, u8 *buf)
83 idpf_ctlq_msg_t ctlq_msg;
84 idpf_dma_mem_t *dma_mem = NULL;
85 idpf_vc_result_t result = IDPF_MSG_NON;
90 ret = idpf_ctlq_recv (id->arq, &pending, &ctlq_msg);
93 idpf_log_debug (id, "Can't read msg from AQ");
95 result = IDPF_MSG_ERR;
99 clib_memcpy_fast (buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
101 opcode = ctlq_msg.cookie.mbx.chnl_opcode;
102 id->cmd_retval = ctlq_msg.cookie.mbx.chnl_retval;
104 idpf_log_debug (id, "CQ from CP carries opcode %u, retval %d", opcode,
107 if (opcode == VIRTCHNL2_OP_EVENT)
109 virtchnl2_event_t *ve =
110 (virtchnl2_event_t *) ctlq_msg.ctx.indirect.payload->va;
112 result = IDPF_MSG_SYS;
115 case VIRTCHNL2_EVENT_LINK_CHANGE:
118 idpf_log_err (id, "%s: Unknown event %d from CP", __func__,
125 /* async reply msg on command issued by pf previously */
126 result = IDPF_MSG_CMD;
127 if (opcode != id->pend_cmd)
129 idpf_log_warn (id, "command mismatch, expect %u, get %u",
130 id->pend_cmd, opcode);
131 result = IDPF_MSG_ERR;
135 if (ctlq_msg.data_len != 0)
136 dma_mem = ctlq_msg.ctx.indirect.payload;
140 ret = idpf_ctlq_post_rx_buffs (id, id->arq, &pending, &dma_mem);
141 if (ret != 0 && dma_mem != NULL)
142 idpf_free_dma_mem (id, dma_mem);
148 idpf_send_vc_msg (vlib_main_t *vm, idpf_device_t *id, virtchnl2_op_t op,
151 idpf_ctlq_msg_t *ctlq_msg;
152 idpf_dma_mem_t *dma_mem;
155 error = idpf_vc_clean (vm, id);
159 ctlq_msg = clib_mem_alloc (sizeof (idpf_ctlq_msg_t));
160 if (ctlq_msg == NULL)
162 clib_memset (ctlq_msg, 0, sizeof (idpf_ctlq_msg_t));
164 dma_mem = clib_mem_alloc (sizeof (idpf_dma_mem_t));
167 clib_memset (dma_mem, 0, sizeof (idpf_dma_mem_t));
169 dma_mem->va = idpf_alloc_dma_mem (vm, id, dma_mem, IDPF_DFLT_MBX_BUF_SIZE);
170 if (dma_mem->va == NULL)
172 clib_mem_free (dma_mem);
176 clib_memcpy (dma_mem->va, in, in_len);
178 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
179 ctlq_msg->func_id = 0;
180 ctlq_msg->data_len = in_len;
181 ctlq_msg->cookie.mbx.chnl_opcode = op;
182 ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL2_STATUS_SUCCESS;
183 ctlq_msg->ctx.indirect.payload = dma_mem;
185 error = idpf_ctlq_send (id, id->asq, 1, ctlq_msg);
192 idpf_free_dma_mem (id, dma_mem);
194 clib_mem_free (ctlq_msg);
196 return clib_error_return (0, "idpf send vc msg to PF failed");
200 idpf_read_one_msg (vlib_main_t *vm, idpf_device_t *id, u32 ops, u8 *buf,
204 f64 suspend_time = IDPF_SEND_TO_PF_SUSPEND_TIME;
208 ret = idpf_read_msg_from_cp (id, buf_len, buf);
209 if (ret == IDPF_MSG_CMD)
211 vlib_process_suspend (vm, suspend_time);
213 while (i++ < IDPF_SEND_TO_PF_MAX_TRY_TIMES);
214 if (i >= IDPF_SEND_TO_PF_MAX_TRY_TIMES ||
215 id->cmd_retval != VIRTCHNL2_STATUS_SUCCESS)
216 return clib_error_return (0, "idpf read one msg failed");
222 idpf_execute_vc_cmd (vlib_main_t *vm, idpf_device_t *id, idpf_cmd_info_t *args)
224 clib_error_t *error = 0;
225 f64 suspend_time = IDPF_SEND_TO_PF_SUSPEND_TIME;
228 if (id->pend_cmd == VIRTCHNL2_OP_UNKNOWN)
229 id->pend_cmd = args->ops;
231 return clib_error_return (0, "There is incomplete cmd %d", id->pend_cmd);
233 if ((error = idpf_send_vc_msg (vm, id, args->ops, args->in_args,
234 args->in_args_size)))
239 case VIRTCHNL2_OP_VERSION:
240 case VIRTCHNL2_OP_GET_CAPS:
241 case VIRTCHNL2_OP_CREATE_VPORT:
242 case VIRTCHNL2_OP_DESTROY_VPORT:
243 case VIRTCHNL2_OP_SET_RSS_KEY:
244 case VIRTCHNL2_OP_SET_RSS_LUT:
245 case VIRTCHNL2_OP_SET_RSS_HASH:
246 case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
247 case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
248 case VIRTCHNL2_OP_ENABLE_QUEUES:
249 case VIRTCHNL2_OP_DISABLE_QUEUES:
250 case VIRTCHNL2_OP_ENABLE_VPORT:
251 case VIRTCHNL2_OP_DISABLE_VPORT:
252 case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
253 case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
254 case VIRTCHNL2_OP_ALLOC_VECTORS:
255 case VIRTCHNL2_OP_DEALLOC_VECTORS:
256 case VIRTCHNL2_OP_GET_STATS:
257 /* for init virtchnl ops, need to poll the response */
258 error = idpf_read_one_msg (vm, id, args->ops, args->out_buffer,
261 return clib_error_return (0, "idpf read vc message from PF failed");
264 case VIRTCHNL2_OP_GET_PTYPE_INFO:
269 if (id->pend_cmd == VIRTCHNL2_OP_UNKNOWN)
271 vlib_process_suspend (vm, suspend_time);
272 /* If don't read msg or read sys event, continue */
274 while (i++ < IDPF_SEND_TO_PF_MAX_TRY_TIMES);
275 /* If there's no response is received, clear command */
276 if (i >= IDPF_SEND_TO_PF_MAX_TRY_TIMES ||
277 id->cmd_retval != VIRTCHNL2_STATUS_SUCCESS)
278 return clib_error_return (
279 0, "No response or return failure (%d) for cmd %d", id->cmd_retval,
288 idpf_dma_addr (vlib_main_t *vm, idpf_device_t *id, void *p)
290 return (id->flags & IDPF_DEVICE_F_VA_DMA) ? pointer_to_uword (p) :
291 vlib_physmem_get_pa (vm, p);
295 idpf_vc_config_irq_map_unmap (vlib_main_t *vm, idpf_device_t *id,
296 idpf_vport_t *vport, bool map)
298 virtchnl2_queue_vector_maps_t *map_info;
299 virtchnl2_queue_vector_t *vecmap;
300 u16 nb_rxq = vport->id->n_rx_queues;
301 idpf_cmd_info_t args;
305 len = sizeof (virtchnl2_queue_vector_maps_t) +
306 (nb_rxq - 1) * sizeof (virtchnl2_queue_vector_t);
308 map_info = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
309 clib_memset (map_info, 0, len);
311 map_info->vport_id = vport->vport_id;
312 map_info->num_qv_maps = nb_rxq;
313 for (i = 0; i < nb_rxq; i++)
315 vecmap = &map_info->qv_maps[i];
316 vecmap->queue_id = vport->qv_map[i].queue_id;
317 vecmap->vector_id = vport->qv_map[i].vector_id;
318 vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
319 vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
323 map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
324 args.in_args = (u8 *) map_info;
325 args.in_args_size = len;
326 args.out_buffer = id->mbx_resp;
327 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
328 error = idpf_execute_vc_cmd (vm, id, &args);
330 return clib_error_return (
331 0, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
332 map ? "MAP" : "UNMAP");
334 clib_mem_free (map_info);
339 idpf_config_rx_queues_irqs (vlib_main_t *vm, idpf_device_t *id,
342 virtchnl2_queue_vector_t *qv_map;
343 clib_error_t *error = 0;
344 u32 dynctl_reg_start;
346 u32 dynctl_val, itrn_val;
349 qv_map = clib_mem_alloc_aligned (id->n_rx_queues *
350 sizeof (virtchnl2_queue_vector_t),
351 CLIB_CACHE_LINE_BYTES);
352 clib_memset (qv_map, 0, id->n_rx_queues * sizeof (virtchnl2_queue_vector_t));
354 dynctl_reg_start = vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
355 itrn_reg_start = vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
356 dynctl_val = idpf_reg_read (id, dynctl_reg_start);
357 idpf_log_debug (id, "Value of dynctl_reg_start is 0x%x", dynctl_val);
358 itrn_val = idpf_reg_read (id, itrn_reg_start);
359 idpf_log_debug (id, "Value of itrn_reg_start is 0x%x", itrn_val);
362 idpf_reg_write (id, dynctl_reg_start,
363 VIRTCHNL2_ITR_IDX_0 << PF_GLINT_DYN_CTL_ITR_INDX_S |
364 PF_GLINT_DYN_CTL_WB_ON_ITR_M |
365 itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S);
367 idpf_reg_write (id, dynctl_reg_start,
368 VIRTCHNL2_ITR_IDX_0 << PF_GLINT_DYN_CTL_ITR_INDX_S |
369 PF_GLINT_DYN_CTL_WB_ON_ITR_M |
370 IDPF_DFLT_INTERVAL << PF_GLINT_DYN_CTL_INTERVAL_S);
372 for (i = 0; i < id->n_rx_queues; i++)
374 /* map all queues to the same vector */
375 qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
376 qv_map[i].vector_id =
377 vport->recv_vectors->vchunks.vchunks->start_vector_id;
379 vport->qv_map = qv_map;
381 if ((error = idpf_vc_config_irq_map_unmap (vm, id, vport, true)))
383 idpf_log_err (id, "config interrupt mapping failed");
384 goto config_irq_map_err;
390 clib_mem_free (vport->qv_map);
391 vport->qv_map = NULL;
397 idpf_rx_split_bufq_setup (vlib_main_t *vm, idpf_device_t *id,
398 idpf_vport_t *vport, idpf_rxq_t *bufq, u16 qid,
404 bufq->size = rxq_size;
406 bufq->descs = vlib_physmem_alloc_aligned_on_numa (
407 vm, bufq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
410 bufq->buffer_pool_index =
411 vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
413 if ((err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) bufq->descs)))
416 clib_memset ((void *) bufq->descs, 0,
417 bufq->size * sizeof (virtchnl2_rx_desc_t));
418 vec_validate_aligned (bufq->bufs, bufq->size, CLIB_CACHE_LINE_BYTES);
419 bufq->qrx_tail = id->bar0 + (vport->chunks_info.rx_buf_qtail_start +
420 qid * vport->chunks_info.rx_buf_qtail_spacing);
422 n_alloc = vlib_buffer_alloc_from_pool (vm, bufq->bufs, bufq->size - 8,
423 bufq->buffer_pool_index);
425 return clib_error_return (0, "buffer allocation error");
427 bufq->n_enqueued = n_alloc;
428 virtchnl2_rx_desc_t *d = bufq->descs;
429 for (i = 0; i < n_alloc; i++)
431 vlib_buffer_t *b = vlib_get_buffer (vm, bufq->bufs[i]);
432 if (id->flags & IDPF_DEVICE_F_VA_DMA)
433 d->qword[0] = vlib_buffer_get_va (b);
435 d->qword[0] = vlib_buffer_get_pa (vm, b);
443 idpf_split_rxq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
444 u16 qid, u16 rxq_size)
450 vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
451 rxq = vec_elt_at_index (vport->rxqs, qid);
452 rxq->size = rxq_size;
454 rxq->descs = vlib_physmem_alloc_aligned_on_numa (
455 vm, rxq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
458 rxq->buffer_pool_index =
459 vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
462 return vlib_physmem_last_error (vm);
464 if ((err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) rxq->descs)))
467 clib_memset ((void *) rxq->descs, 0,
468 rxq->size * sizeof (virtchnl2_rx_desc_t));
469 vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
470 rxq->qrx_tail = id->bar0 + (vport->chunks_info.rx_qtail_start +
471 qid * vport->chunks_info.rx_qtail_spacing);
473 n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
474 rxq->buffer_pool_index);
477 return clib_error_return (0, "buffer allocation error");
479 rxq->n_enqueued = n_alloc;
480 virtchnl2_rx_desc_t *d = rxq->descs;
481 for (i = 0; i < n_alloc; i++)
483 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
484 if (id->flags & IDPF_DEVICE_F_VA_DMA)
485 d->qword[0] = vlib_buffer_get_va (b);
487 d->qword[0] = vlib_buffer_get_pa (vm, b);
492 idpf_rx_split_bufq_setup (vm, id, vport, rxq->bufq1, 2 * qid, rxq_size);
496 idpf_rx_split_bufq_setup (vm, id, vport, rxq->bufq2, 2 * qid, rxq_size);
504 idpf_single_rxq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
505 u16 qid, u16 rxq_size)
511 vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
512 rxq = vec_elt_at_index (vport->rxqs, qid);
513 rxq->queue_index = vport->chunks_info.rx_start_qid + qid;
514 rxq->size = rxq_size;
516 rxq->descs = vlib_physmem_alloc_aligned_on_numa (
517 vm, rxq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
520 rxq->buffer_pool_index =
521 vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
524 return vlib_physmem_last_error (vm);
526 err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) rxq->descs);
530 clib_memset ((void *) rxq->descs, 0,
531 rxq->size * sizeof (virtchnl2_rx_desc_t));
532 vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
533 rxq->qrx_tail = id->bar0 + (vport->chunks_info.rx_qtail_start +
534 qid * vport->chunks_info.rx_qtail_spacing);
536 n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
537 rxq->buffer_pool_index);
540 return clib_error_return (0, "buffer allocation error");
542 rxq->n_enqueued = n_alloc;
543 virtchnl2_rx_desc_t *d = rxq->descs;
544 for (i = 0; i < n_alloc; i++)
546 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
547 if (id->flags & IDPF_DEVICE_F_VA_DMA)
548 d->qword[0] = vlib_buffer_get_va (b);
550 d->qword[0] = vlib_buffer_get_pa (vm, b);
558 idpf_rx_queue_setup (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
559 u16 qid, u16 rxq_size)
561 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
562 return idpf_single_rxq_init (vm, id, vport, qid, rxq_size);
564 return idpf_split_rxq_init (vm, id, vport, qid, rxq_size);
568 idpf_tx_split_complq_setup (vlib_main_t *vm, idpf_device_t *id,
569 idpf_vport_t *vport, idpf_txq_t *complq, u16 qid,
574 u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
576 complq->size = txq_size;
578 clib_spinlock_init (&complq->lock);
580 n = (complq->size / 510) + 1;
581 vec_validate_aligned (complq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
583 if (!vlib_buffer_alloc_from_pool (vm, complq->ph_bufs, n, bpi))
584 return clib_error_return (0, "buffer allocation error");
586 complq->descs = vlib_physmem_alloc_aligned_on_numa (
587 vm, complq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
589 if (complq->descs == 0)
590 return vlib_physmem_last_error (vm);
593 vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) complq->descs)))
596 vec_validate_aligned (complq->bufs, complq->size, CLIB_CACHE_LINE_BYTES);
598 id->bar0 + (vport->chunks_info.tx_compl_qtail_start +
599 qid * vport->chunks_info.tx_compl_qtail_spacing);
601 /* initialize ring of pending RS slots */
602 clib_ring_new_aligned (complq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
604 vec_validate_aligned (complq->tmp_descs, complq->size,
605 CLIB_CACHE_LINE_BYTES);
606 vec_validate_aligned (complq->tmp_bufs, complq->size, CLIB_CACHE_LINE_BYTES);
612 idpf_split_txq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
613 u16 qid, u16 txq_size)
618 u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
620 vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
621 txq = vec_elt_at_index (vport->txqs, qid);
622 txq->size = txq_size;
624 clib_spinlock_init (&txq->lock);
626 n = (txq->size / 510) + 1;
627 vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
629 if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
630 return clib_error_return (0, "buffer allocation error");
632 txq->descs = vlib_physmem_alloc_aligned_on_numa (
633 vm, txq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
636 return vlib_physmem_last_error (vm);
638 err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) txq->descs);
642 vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
643 txq->qtx_tail = id->bar0 + (vport->chunks_info.tx_qtail_start +
644 qid * vport->chunks_info.tx_qtail_spacing);
646 /* initialize ring of pending RS slots */
647 clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
649 vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
650 vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
652 complq_qid = vport->chunks_info.tx_compl_start_qid + qid;
653 err = idpf_tx_split_complq_setup (vm, id, vport, txq->complq, complq_qid,
662 idpf_single_txq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
663 u16 qid, u16 txq_size)
668 u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
670 vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
671 txq = vec_elt_at_index (vport->txqs, qid);
672 txq->queue_index = vport->chunks_info.tx_start_qid + qid;
673 txq->size = txq_size;
675 clib_spinlock_init (&txq->lock);
677 n = (txq->size / 510) + 1;
678 vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
680 if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
681 return clib_error_return (0, "buffer allocation error");
683 txq->descs = vlib_physmem_alloc_aligned_on_numa (
684 vm, txq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
687 return vlib_physmem_last_error (vm);
689 err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) txq->descs);
693 vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
694 txq->qtx_tail = id->bar0 + (vport->chunks_info.tx_qtail_start +
695 qid * vport->chunks_info.tx_qtail_spacing);
697 /* initialize ring of pending RS slots */
698 clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
700 vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
701 vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
707 idpf_tx_queue_setup (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
708 u16 qid, u16 txq_size)
710 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
711 return idpf_single_txq_init (vm, id, vport, qid, txq_size);
713 return idpf_split_txq_init (vm, id, vport, qid, txq_size);
717 idpf_vc_config_txq (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
721 virtchnl2_config_tx_queues_t *vc_txqs = NULL;
722 virtchnl2_txq_info_t *txq_info;
723 idpf_cmd_info_t args;
728 vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
729 txq = vec_elt_at_index (vport->txqs, qid);
731 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
732 num_qs = IDPF_TXQ_PER_GRP;
734 num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
736 size = sizeof (*vc_txqs) + (num_qs - 1) * sizeof (virtchnl2_txq_info_t);
737 vc_txqs = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
738 clib_memset (vc_txqs, 0, size);
740 vc_txqs->vport_id = vport->vport_id;
741 vc_txqs->num_qinfo = num_qs;
743 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
745 txq_info = &vc_txqs->qinfo[0];
746 txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) txq->descs);
747 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
748 txq_info->queue_id = txq->queue_index;
749 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
750 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
751 txq_info->ring_len = txq->size;
756 txq_info = &vc_txqs->qinfo[0];
757 txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) txq->descs);
758 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
759 txq_info->queue_id = txq->queue_index;
760 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
761 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
762 txq_info->ring_len = txq->size;
763 txq_info->tx_compl_queue_id = txq->complq->queue_index;
764 txq_info->relative_queue_id = txq_info->queue_id;
766 /* tx completion queue info */
767 idpf_txq_t *complq = txq->complq;
768 txq_info = &vc_txqs->qinfo[1];
769 txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) complq->descs);
770 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
771 txq_info->queue_id = complq->queue_index;
772 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
773 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
774 txq_info->ring_len = complq->size;
777 clib_memset (&args, 0, sizeof (args));
778 args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
779 args.in_args = (u8 *) vc_txqs;
780 args.in_args_size = size;
781 args.out_buffer = id->mbx_resp;
782 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
784 error = idpf_execute_vc_cmd (vm, id, &args);
785 clib_mem_free (vc_txqs);
787 return clib_error_return (
788 0, "Failed to execute command VIRTCHNL2_OP_CONFIG_TX_QUEUES");
794 idpf_vc_config_rxq (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
798 virtchnl2_config_rx_queues_t *vc_rxqs = NULL;
799 virtchnl2_rxq_info_t *rxq_info;
800 idpf_cmd_info_t args;
805 vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
806 rxq = vec_elt_at_index (vport->rxqs, qid);
808 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
809 num_qs = IDPF_RXQ_PER_GRP;
811 num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
813 size = sizeof (*vc_rxqs) + (num_qs - 1) * sizeof (virtchnl2_rxq_info_t);
814 vc_rxqs = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
815 clib_memset (vc_rxqs, 0, size);
817 vc_rxqs->vport_id = vport->vport_id;
818 vc_rxqs->num_qinfo = num_qs;
820 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
822 rxq_info = &vc_rxqs->qinfo[0];
823 rxq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) rxq->descs);
824 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
825 rxq_info->queue_id = rxq->queue_index;
826 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
827 rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
828 rxq_info->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
830 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
831 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
833 rxq_info->ring_len = rxq->size;
838 rxq_info = &vc_rxqs->qinfo[0];
839 rxq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) rxq->descs);
840 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
841 rxq_info->queue_id = rxq->queue_index;
842 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
843 rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
844 rxq_info->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
846 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
847 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
849 rxq_info->ring_len = rxq->size;
850 rxq_info->rx_bufq1_id = rxq->bufq1->queue_index;
851 rxq_info->rx_bufq2_id = rxq->bufq2->queue_index;
852 rxq_info->rx_buffer_low_watermark = 64;
855 for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++)
857 idpf_rxq_t *bufq = (i == 1 ? rxq->bufq1 : rxq->bufq2);
858 rxq_info = &vc_rxqs->qinfo[i];
859 rxq_info->dma_ring_addr =
860 idpf_dma_addr (vm, id, (void *) bufq->descs);
861 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
862 rxq_info->queue_id = bufq->queue_index;
863 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
864 rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
865 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
866 rxq_info->ring_len = bufq->size;
868 rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
869 rxq_info->rx_buffer_low_watermark = 64;
873 clib_memset (&args, 0, sizeof (args));
874 args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
875 args.in_args = (u8 *) vc_rxqs;
876 args.in_args_size = size;
877 args.out_buffer = id->mbx_resp;
878 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
880 error = idpf_execute_vc_cmd (vm, id, &args);
881 clib_mem_free (vc_rxqs);
883 return clib_error_return (
884 0, "Failed to execute command VIRTCHNL2_OP_CONFIG_RX_QUEUES");
890 idpf_alloc_vectors (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
891 uint16_t num_vectors)
893 virtchnl2_alloc_vectors_t *alloc_vec;
894 idpf_cmd_info_t args;
898 len = sizeof (virtchnl2_alloc_vectors_t) +
899 (num_vectors - 1) * sizeof (virtchnl2_vector_chunk_t);
900 alloc_vec = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
901 clib_memset (alloc_vec, 0, len);
903 alloc_vec->num_vectors = num_vectors;
905 args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
906 args.in_args = (u8 *) alloc_vec;
907 args.in_args_size = sizeof (virtchnl2_alloc_vectors_t);
908 args.out_buffer = id->mbx_resp;
909 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
910 error = idpf_execute_vc_cmd (vm, id, &args);
912 return clib_error_return (
913 0, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
915 if (vport->recv_vectors == NULL)
917 vport->recv_vectors =
918 clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
919 clib_memset (vport->recv_vectors, 0, len);
922 clib_memcpy (vport->recv_vectors, args.out_buffer, len);
923 clib_mem_free (alloc_vec);
928 idpf_vc_ena_dis_one_queue (vlib_main_t *vm, idpf_device_t *id,
929 idpf_vport_t *vport, u16 qid, u32 type, bool on)
931 virtchnl2_del_ena_dis_queues_t *queue_select;
932 virtchnl2_queue_chunk_t *queue_chunk;
933 idpf_cmd_info_t args;
934 clib_error_t *error = 0;
937 len = sizeof (virtchnl2_del_ena_dis_queues_t);
938 queue_select = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
939 clib_memset (queue_select, 0, len);
941 queue_chunk = queue_select->chunks.chunks;
942 queue_select->chunks.num_chunks = 1;
943 queue_select->vport_id = vport->vport_id;
945 queue_chunk->type = type;
946 queue_chunk->start_queue_id = qid;
947 queue_chunk->num_queues = 1;
949 args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES : VIRTCHNL2_OP_DISABLE_QUEUES;
950 args.in_args = (u8 *) queue_select;
951 args.in_args_size = len;
952 args.out_buffer = id->mbx_resp;
953 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
954 error = idpf_execute_vc_cmd (vm, id, &args);
956 return clib_error_return (
957 0, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
958 on ? "ENABLE" : "DISABLE");
960 clib_mem_free (queue_select);
965 idpf_op_enable_queues (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
966 u16 qid, bool rx, bool on)
973 type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
975 if (type == VIRTCHNL2_QUEUE_TYPE_RX)
977 queue_index = vport->chunks_info.rx_start_qid + qid;
978 error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
982 queue_index = vport->chunks_info.tx_start_qid + qid;
983 error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
988 /* switch tx completion queue */
989 if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
991 type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
992 queue_index = vport->chunks_info.tx_compl_start_qid + qid;
993 error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
998 /* switch rx buffer queue */
999 if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
1001 type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1002 queue_index = vport->chunks_info.rx_buf_start_qid + 2 * qid;
1003 error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
1007 error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
1016 idpf_queue_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
1017 idpf_create_if_args_t *args)
1019 clib_error_t *error = 0;
1022 for (i = 0; i < id->n_rx_queues; i++)
1024 if ((error = idpf_rx_queue_setup (vm, id, vport, i, args->rxq_size)))
1026 if ((error = idpf_vc_config_rxq (vm, id, vport, i)))
1028 if ((error = idpf_op_enable_queues (vm, id, vport, i, true, true)))
1032 for (i = 0; i < id->n_tx_queues; i++)
1034 if ((error = idpf_tx_queue_setup (vm, id, vport, i, args->txq_size)))
1036 if ((error = idpf_vc_config_txq (vm, id, vport, i)))
1038 if ((error = idpf_op_enable_queues (vm, id, vport, i, false, true)))
1042 if ((error = idpf_alloc_vectors (vm, id, vport, IDPF_DFLT_Q_VEC_NUM)))
1045 if ((error = idpf_config_rx_queues_irqs (vm, id, vport)))
1052 idpf_op_version (vlib_main_t *vm, idpf_device_t *id)
1054 clib_error_t *error = 0;
1055 idpf_cmd_info_t args;
1056 virtchnl2_version_info_t myver = {
1057 .major = VIRTCHNL2_VERSION_MAJOR_2,
1058 .minor = VIRTCHNL2_VERSION_MINOR_0,
1060 virtchnl2_version_info_t ver = { 0 };
1062 idpf_log_debug (id, "version: major %u minor %u", myver.major, myver.minor);
1064 args.ops = VIRTCHNL2_OP_VERSION;
1065 args.in_args = (u8 *) &myver;
1066 args.in_args_size = sizeof (myver);
1067 args.out_buffer = id->mbx_resp;
1068 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1069 error = idpf_execute_vc_cmd (vm, id, &args);
1071 return clib_error_return (0,
1072 "Failed to execute command VIRTCHNL_OP_VERSION");
1074 clib_memcpy (&ver, args.out_buffer, sizeof (ver));
1076 if (ver.major != VIRTCHNL2_VERSION_MAJOR_2 ||
1077 ver.minor != VIRTCHNL2_VERSION_MINOR_0)
1078 return clib_error_return (0,
1079 "incompatible virtchnl version "
1081 ver.major, ver.minor);
1087 idpf_op_get_caps (vlib_main_t *vm, idpf_device_t *id,
1088 virtchnl2_get_capabilities_t *caps)
1090 virtchnl2_get_capabilities_t caps_msg = { 0 };
1091 idpf_cmd_info_t args;
1092 clib_error_t *error = 0;
1094 caps_msg.csum_caps =
1095 VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |
1096 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |
1097 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP |
1098 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | VIRTCHNL2_CAP_TX_CSUM_GENERIC |
1099 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |
1100 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |
1101 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP |
1102 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | VIRTCHNL2_CAP_RX_CSUM_GENERIC;
1104 caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
1106 args.ops = VIRTCHNL2_OP_GET_CAPS;
1107 args.in_args = (u8 *) &caps_msg;
1108 args.in_args_size = sizeof (caps_msg);
1109 args.out_buffer = id->mbx_resp;
1110 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1111 error = idpf_execute_vc_cmd (vm, id, &args);
1113 return clib_error_return (
1114 0, "Failed to execute command VIRTCHNL2_OP_GET_CAPS");
1116 clib_memcpy (caps, args.out_buffer, sizeof (*caps));
1122 idpf_mbx_init (vlib_main_t *vm, idpf_device_t *id)
1124 idpf_ctlq_create_info_t ctlq_info[CTLQ_NUM] = {
1126 .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
1128 .len = IDPF_CTLQ_LEN,
1129 .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
1133 .len = PF_FW_ATQLEN,
1134 .bah = PF_FW_ATQBAH,
1135 .bal = PF_FW_ATQBAL,
1136 .len_mask = PF_FW_ATQLEN_ATQLEN_M,
1137 .len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
1138 .head_mask = PF_FW_ATQH_ATQH_M,
1142 .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
1144 .len = IDPF_CTLQ_LEN,
1145 .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
1149 .len = PF_FW_ARQLEN,
1150 .bah = PF_FW_ARQBAH,
1151 .bal = PF_FW_ARQBAL,
1152 .len_mask = PF_FW_ARQLEN_ARQLEN_M,
1153 .len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
1154 .head_mask = PF_FW_ARQH_ARQH_M,
1158 struct idpf_ctlq_info *ctlq;
1160 if (idpf_ctlq_init (vm, id, CTLQ_NUM, ctlq_info))
1161 return clib_error_return (0, "ctlq init failed");
1163 LIST_FOR_EACH_ENTRY_SAFE (ctlq, NULL, &id->cq_list_head,
1164 struct idpf_ctlq_info, cq_list)
1166 if (ctlq->q_id == IDPF_CTLQ_ID &&
1167 ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
1169 if (ctlq->q_id == IDPF_CTLQ_ID &&
1170 ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
1174 if (!id->asq || !id->arq)
1176 idpf_ctlq_deinit (id);
1177 return clib_error_return (0, "ctlq deinit");
1184 idpf_vc_query_ptype_info (vlib_main_t *vm, idpf_device_t *id)
1186 virtchnl2_get_ptype_info_t ptype_info;
1187 idpf_cmd_info_t args;
1188 clib_error_t *error;
1190 ptype_info.start_ptype_id = 0;
1191 ptype_info.num_ptypes = IDPF_MAX_PKT_TYPE;
1192 args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
1193 args.in_args = (u8 *) &ptype_info;
1194 args.in_args_size = sizeof (virtchnl2_get_ptype_info_t);
1195 args.out_buffer = NULL;
1198 error = idpf_execute_vc_cmd (vm, id, &args);
1200 return clib_error_return (
1201 0, "Failed to execute command VIRTCHNL2_OP_GET_PTYPE_INFO");
1207 idpf_get_pkt_type (vlib_main_t *vm, idpf_device_t *id)
1209 virtchnl2_get_ptype_info_t *ptype_info;
1210 u16 ptype_recvd = 0, ptype_offset, i, j;
1211 clib_error_t *error;
1213 error = idpf_vc_query_ptype_info (vm, id);
1215 return clib_error_return (0, "Fail to query packet type information");
1218 clib_mem_alloc_aligned (IDPF_DFLT_MBX_BUF_SIZE, CLIB_CACHE_LINE_BYTES);
1220 while (ptype_recvd < IDPF_MAX_PKT_TYPE)
1222 error = idpf_read_one_msg (vm, id, VIRTCHNL2_OP_GET_PTYPE_INFO,
1223 (u8 *) ptype_info, IDPF_DFLT_MBX_BUF_SIZE);
1226 error = clib_error_return (0, "Fail to get packet type information");
1227 goto free_ptype_info;
1230 ptype_recvd += ptype_info->num_ptypes;
1232 sizeof (virtchnl2_get_ptype_info_t) - sizeof (virtchnl2_ptype_t);
1234 for (i = 0; i < ptype_info->num_ptypes; i++)
1236 bool is_inner = false, is_ip = false;
1237 virtchnl2_ptype_t *ptype;
1240 ptype = (virtchnl2_ptype_t *) ((u8 *) ptype_info + ptype_offset);
1241 ptype_offset += IDPF_GET_PTYPE_SIZE (ptype);
1242 if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE)
1245 clib_error_return (0, "Ptype offset exceeds mbx buffer size");
1246 goto free_ptype_info;
1249 if (ptype->ptype_id_10 == 0xFFFF)
1250 goto free_ptype_info;
1252 for (j = 0; j < ptype->proto_id_count; j++)
1254 switch (ptype->proto_id[j])
1256 case VIRTCHNL2_PROTO_HDR_GRE:
1257 case VIRTCHNL2_PROTO_HDR_VXLAN:
1258 proto_hdr &= ~IDPF_PTYPE_L4_MASK;
1259 proto_hdr |= IDPF_PTYPE_TUNNEL_GRENAT;
1262 case VIRTCHNL2_PROTO_HDR_MAC:
1265 proto_hdr &= ~IDPF_PTYPE_INNER_L2_MASK;
1266 proto_hdr |= IDPF_PTYPE_INNER_L2_ETHER;
1270 proto_hdr &= ~IDPF_PTYPE_L2_MASK;
1271 proto_hdr |= IDPF_PTYPE_L2_ETHER;
1274 case VIRTCHNL2_PROTO_HDR_VLAN:
1277 proto_hdr &= ~IDPF_PTYPE_INNER_L2_MASK;
1278 proto_hdr |= IDPF_PTYPE_INNER_L2_ETHER_VLAN;
1281 case VIRTCHNL2_PROTO_HDR_PTP:
1282 proto_hdr &= ~IDPF_PTYPE_L2_MASK;
1283 proto_hdr |= IDPF_PTYPE_L2_ETHER_TIMESYNC;
1285 case VIRTCHNL2_PROTO_HDR_LLDP:
1286 proto_hdr &= ~IDPF_PTYPE_L2_MASK;
1287 proto_hdr |= IDPF_PTYPE_L2_ETHER_LLDP;
1289 case VIRTCHNL2_PROTO_HDR_ARP:
1290 proto_hdr &= ~IDPF_PTYPE_L2_MASK;
1291 proto_hdr |= IDPF_PTYPE_L2_ETHER_ARP;
1293 case VIRTCHNL2_PROTO_HDR_PPPOE:
1294 proto_hdr &= ~IDPF_PTYPE_L2_MASK;
1295 proto_hdr |= IDPF_PTYPE_L2_ETHER_PPPOE;
1297 case VIRTCHNL2_PROTO_HDR_IPV4:
1300 proto_hdr |= IDPF_PTYPE_L3_IPV4_EXT_UNKNOWN;
1305 proto_hdr |= IDPF_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1306 IDPF_PTYPE_TUNNEL_IP;
1310 case VIRTCHNL2_PROTO_HDR_IPV6:
1313 proto_hdr |= IDPF_PTYPE_L3_IPV6_EXT_UNKNOWN;
1318 proto_hdr |= IDPF_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1319 IDPF_PTYPE_TUNNEL_IP;
1323 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
1324 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
1326 proto_hdr |= IDPF_PTYPE_INNER_L4_FRAG;
1328 proto_hdr |= IDPF_PTYPE_L4_FRAG;
1330 case VIRTCHNL2_PROTO_HDR_UDP:
1332 proto_hdr |= IDPF_PTYPE_INNER_L4_UDP;
1334 proto_hdr |= IDPF_PTYPE_L4_UDP;
1336 case VIRTCHNL2_PROTO_HDR_TCP:
1338 proto_hdr |= IDPF_PTYPE_INNER_L4_TCP;
1340 proto_hdr |= IDPF_PTYPE_L4_TCP;
1342 case VIRTCHNL2_PROTO_HDR_SCTP:
1344 proto_hdr |= IDPF_PTYPE_INNER_L4_SCTP;
1346 proto_hdr |= IDPF_PTYPE_L4_SCTP;
1348 case VIRTCHNL2_PROTO_HDR_ICMP:
1350 proto_hdr |= IDPF_PTYPE_INNER_L4_ICMP;
1352 proto_hdr |= IDPF_PTYPE_L4_ICMP;
1354 case VIRTCHNL2_PROTO_HDR_ICMPV6:
1356 proto_hdr |= IDPF_PTYPE_INNER_L4_ICMP;
1358 proto_hdr |= IDPF_PTYPE_L4_ICMP;
1360 case VIRTCHNL2_PROTO_HDR_L2TPV2:
1361 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
1362 case VIRTCHNL2_PROTO_HDR_L2TPV3:
1364 proto_hdr |= IDPF_PTYPE_TUNNEL_L2TP;
1366 case VIRTCHNL2_PROTO_HDR_NVGRE:
1368 proto_hdr |= IDPF_PTYPE_TUNNEL_NVGRE;
1370 case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
1372 proto_hdr |= IDPF_PTYPE_TUNNEL_GTPC;
1374 case VIRTCHNL2_PROTO_HDR_GTPU:
1375 case VIRTCHNL2_PROTO_HDR_GTPU_UL:
1376 case VIRTCHNL2_PROTO_HDR_GTPU_DL:
1378 proto_hdr |= IDPF_PTYPE_TUNNEL_GTPU;
1380 case VIRTCHNL2_PROTO_HDR_PAY:
1381 case VIRTCHNL2_PROTO_HDR_IPV6_EH:
1382 case VIRTCHNL2_PROTO_HDR_PRE_MAC:
1383 case VIRTCHNL2_PROTO_HDR_POST_MAC:
1384 case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
1385 case VIRTCHNL2_PROTO_HDR_SVLAN:
1386 case VIRTCHNL2_PROTO_HDR_CVLAN:
1387 case VIRTCHNL2_PROTO_HDR_MPLS:
1388 case VIRTCHNL2_PROTO_HDR_MMPLS:
1389 case VIRTCHNL2_PROTO_HDR_CTRL:
1390 case VIRTCHNL2_PROTO_HDR_ECP:
1391 case VIRTCHNL2_PROTO_HDR_EAPOL:
1392 case VIRTCHNL2_PROTO_HDR_PPPOD:
1393 case VIRTCHNL2_PROTO_HDR_IGMP:
1394 case VIRTCHNL2_PROTO_HDR_AH:
1395 case VIRTCHNL2_PROTO_HDR_ESP:
1396 case VIRTCHNL2_PROTO_HDR_IKE:
1397 case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
1398 case VIRTCHNL2_PROTO_HDR_GTP:
1399 case VIRTCHNL2_PROTO_HDR_GTP_EH:
1400 case VIRTCHNL2_PROTO_HDR_GTPCV2:
1401 case VIRTCHNL2_PROTO_HDR_ECPRI:
1402 case VIRTCHNL2_PROTO_HDR_VRRP:
1403 case VIRTCHNL2_PROTO_HDR_OSPF:
1404 case VIRTCHNL2_PROTO_HDR_TUN:
1405 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
1406 case VIRTCHNL2_PROTO_HDR_GENEVE:
1407 case VIRTCHNL2_PROTO_HDR_NSH:
1408 case VIRTCHNL2_PROTO_HDR_QUIC:
1409 case VIRTCHNL2_PROTO_HDR_PFCP:
1410 case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
1411 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
1412 case VIRTCHNL2_PROTO_HDR_RTP:
1413 case VIRTCHNL2_PROTO_HDR_NO_PROTO:
1417 id->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
1423 clib_mem_free (ptype_info);
1429 idpf_reset_pf (idpf_device_t *id)
1433 reg = idpf_reg_read (id, PFGEN_CTRL);
1434 idpf_reg_write (id, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
1437 #define IDPF_RESET_WAIT_CNT 100
1439 idpf_check_pf_reset_done (vlib_main_t *vm, idpf_device_t *id)
1444 for (i = 0; i < IDPF_RESET_WAIT_CNT; i++)
1446 reg = idpf_reg_read (id, PFGEN_RSTAT);
1447 if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
1449 vlib_process_suspend (vm, 1.0);
1452 return clib_error_return (0, "pf reset time out");
1456 idpf_init_vport_req_info (idpf_device_t *id,
1457 virtchnl2_create_vport_t *vport_info)
1459 vport_info->vport_type = VIRTCHNL2_VPORT_TYPE_DEFAULT;
1460 if (id->txq_model == 1)
1462 vport_info->txq_model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1463 vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
1464 vport_info->num_tx_complq =
1465 IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;
1469 vport_info->txq_model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
1470 vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
1471 vport_info->num_tx_complq = 0;
1473 if (id->rxq_model == 1)
1475 vport_info->rxq_model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1476 vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
1477 vport_info->num_rx_bufq = IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;
1481 vport_info->rxq_model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
1482 vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
1483 vport_info->num_rx_bufq = 0;
1490 idpf_vc_create_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
1491 virtchnl2_create_vport_t *vport_req_info)
1493 virtchnl2_create_vport_t vport_msg = { 0 };
1494 idpf_cmd_info_t args;
1495 clib_error_t *error;
1497 vport_msg.vport_type = vport_req_info->vport_type;
1498 vport_msg.txq_model = vport_req_info->txq_model;
1499 vport_msg.rxq_model = vport_req_info->rxq_model;
1500 vport_msg.num_tx_q = vport_req_info->num_tx_q;
1501 vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
1502 vport_msg.num_rx_q = vport_req_info->num_rx_q;
1503 vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
1505 clib_memset (&args, 0, sizeof (args));
1506 args.ops = VIRTCHNL2_OP_CREATE_VPORT;
1507 args.in_args = (u8 *) &vport_msg;
1508 args.in_args_size = sizeof (vport_msg);
1509 args.out_buffer = id->mbx_resp;
1510 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1511 error = idpf_execute_vc_cmd (vm, id, &args);
1513 return clib_error_return (
1514 0, "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
1516 clib_memcpy (vport->vport_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
1521 idpf_vc_destroy_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport)
1523 virtchnl2_vport_t vc_vport;
1524 idpf_cmd_info_t args;
1525 clib_error_t *error = 0;
1527 vc_vport.vport_id = vport->vport_id;
1529 clib_memset (&args, 0, sizeof (args));
1530 args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
1531 args.in_args = (u8 *) &vc_vport;
1532 args.in_args_size = sizeof (vc_vport);
1533 args.out_buffer = id->mbx_resp;
1534 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1535 error = idpf_execute_vc_cmd (vm, id, &args);
1537 return clib_error_return (
1538 0, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
1544 idpf_init_vport (idpf_device_t *id, idpf_vport_t *vport)
1546 virtchnl2_create_vport_t *vport_info = vport->vport_info;
1549 vport->vport_id = vport_info->vport_id;
1550 vport->txq_model = vport_info->txq_model;
1551 vport->rxq_model = vport_info->rxq_model;
1552 vport->num_tx_q = vport_info->num_tx_q;
1553 vport->num_tx_complq = vport_info->num_tx_complq;
1554 vport->num_rx_q = vport_info->num_rx_q;
1555 vport->num_rx_bufq = vport_info->num_rx_bufq;
1556 vport->max_mtu = vport_info->max_mtu;
1557 clib_memcpy (vport->default_mac_addr, vport_info->default_mac_addr,
1560 for (i = 0; i < vport_info->chunks.num_chunks; i++)
1562 type = vport_info->chunks.chunks[i].type;
1565 case VIRTCHNL2_QUEUE_TYPE_TX:
1566 vport->chunks_info.tx_start_qid =
1567 vport_info->chunks.chunks[i].start_queue_id;
1568 vport->chunks_info.tx_qtail_start =
1569 vport_info->chunks.chunks[i].qtail_reg_start;
1570 vport->chunks_info.tx_qtail_spacing =
1571 vport_info->chunks.chunks[i].qtail_reg_spacing;
1573 case VIRTCHNL2_QUEUE_TYPE_RX:
1574 vport->chunks_info.rx_start_qid =
1575 vport_info->chunks.chunks[i].start_queue_id;
1576 vport->chunks_info.rx_qtail_start =
1577 vport_info->chunks.chunks[i].qtail_reg_start;
1578 vport->chunks_info.rx_qtail_spacing =
1579 vport_info->chunks.chunks[i].qtail_reg_spacing;
1581 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1582 vport->chunks_info.tx_compl_start_qid =
1583 vport_info->chunks.chunks[i].start_queue_id;
1584 vport->chunks_info.tx_compl_qtail_start =
1585 vport_info->chunks.chunks[i].qtail_reg_start;
1586 vport->chunks_info.tx_compl_qtail_spacing =
1587 vport_info->chunks.chunks[i].qtail_reg_spacing;
1589 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1590 vport->chunks_info.rx_buf_start_qid =
1591 vport_info->chunks.chunks[i].start_queue_id;
1592 vport->chunks_info.rx_buf_qtail_start =
1593 vport_info->chunks.chunks[i].qtail_reg_start;
1594 vport->chunks_info.rx_buf_qtail_spacing =
1595 vport_info->chunks.chunks[i].qtail_reg_spacing;
1598 return clib_error_return (0, "Unsupported queue type");
1606 idpf_ena_dis_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
1609 virtchnl2_vport_t vc_vport;
1610 idpf_cmd_info_t args;
1611 clib_error_t *error;
1613 vc_vport.vport_id = vport->vport_id;
1614 args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT : VIRTCHNL2_OP_DISABLE_VPORT;
1615 args.in_args = (u8 *) &vc_vport;
1616 args.in_args_size = sizeof (vc_vport);
1617 args.out_buffer = id->mbx_resp;
1618 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1620 error = idpf_execute_vc_cmd (vm, id, &args);
1623 return clib_error_return (
1624 0, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
1625 enable ? "ENABLE" : "DISABLE");
1632 idpf_dealloc_vectors (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport)
1634 virtchnl2_alloc_vectors_t *alloc_vec;
1635 virtchnl2_vector_chunks_t *vcs;
1636 idpf_cmd_info_t args;
1637 clib_error_t *error;
1640 alloc_vec = vport->recv_vectors;
1641 vcs = &alloc_vec->vchunks;
1643 len = sizeof (virtchnl2_vector_chunks_t) +
1644 (vcs->num_vchunks - 1) * sizeof (virtchnl2_vector_chunk_t);
1646 args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
1647 args.in_args = (u8 *) vcs;
1648 args.in_args_size = len;
1649 args.out_buffer = id->mbx_resp;
1650 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1651 error = idpf_execute_vc_cmd (vm, id, &args);
1653 return clib_error_return (
1654 0, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
1660 idpf_dev_vport_init (vlib_main_t *vm, idpf_device_t *id,
1661 idpf_vport_param_t *param)
1663 idpf_vport_t *vport;
1664 virtchnl2_create_vport_t vport_req_info = { 0 };
1665 clib_error_t *error = 0;
1667 vport = clib_mem_alloc (sizeof (idpf_vport_t));
1668 clib_memset (vport, 0, sizeof (idpf_vport_t));
1670 vport->vport_info = clib_mem_alloc (IDPF_DFLT_MBX_BUF_SIZE);
1671 clib_memset (vport->vport_info, 0, IDPF_DFLT_MBX_BUF_SIZE);
1673 id->vports[param->idx] = vport;
1675 vport->idx = param->idx;
1677 idpf_init_vport_req_info (id, &vport_req_info);
1679 error = idpf_vc_create_vport (vm, id, vport, &vport_req_info);
1682 idpf_log_err (id, "Failed to create vport.");
1683 goto err_create_vport;
1686 error = idpf_init_vport (id, vport);
1689 idpf_log_err (id, "Failed to init vports.");
1690 goto err_init_vport;
1693 id->vports[param->idx] = vport;
1695 clib_memcpy (id->hwaddr, vport->default_mac_addr, IDPF_ETH_ALEN);
1700 id->vports[param->idx] = NULL; /* reset */
1701 idpf_vc_destroy_vport (vm, id, vport);
1703 clib_mem_free (vport->vport_info);
1704 clib_mem_free (vport);
1710 idpf_device_init (vlib_main_t *vm, idpf_main_t *im, idpf_device_t *id,
1711 idpf_create_if_args_t *args)
1713 idpf_vport_t *vport;
1714 idpf_vport_param_t vport_param = { 0 };
1715 virtchnl2_get_capabilities_t caps = { 0 };
1716 clib_error_t *error;
1717 u16 rxq_num, txq_num;
1721 error = idpf_check_pf_reset_done (vm, id);
1726 * Init mailbox configuration
1728 if ((error = idpf_mbx_init (vm, id)))
1734 error = idpf_op_version (vm, id);
1739 * Get pkt type table
1741 error = idpf_get_pkt_type (vm, id);
1745 /* Get idpf capability */
1746 error = idpf_op_get_caps (vm, id, &caps);
1750 rxq_num = args->rxq_num ? args->rxq_num : 1;
1751 txq_num = args->txq_num ? args->txq_num : vlib_get_n_threads ();
1753 /* Sync capabilities */
1754 id->n_rx_queues = rxq_num;
1755 id->n_tx_queues = txq_num;
1756 id->csum_caps = caps.csum_caps;
1757 id->seg_caps = caps.seg_caps;
1758 id->hsplit_caps = caps.hsplit_caps;
1759 id->rsc_caps = caps.rsc_caps;
1760 id->rss_caps = caps.rss_caps;
1761 id->other_caps = caps.other_caps;
1762 id->max_rx_q = caps.max_rx_q;
1763 id->max_tx_q = caps.max_tx_q;
1764 id->max_rx_bufq = caps.max_rx_bufq;
1765 id->max_tx_complq = caps.max_tx_complq;
1766 id->max_sriov_vfs = caps.max_sriov_vfs;
1767 id->max_vports = caps.max_vports;
1768 id->default_num_vports = caps.default_num_vports;
1770 id->vports = clib_mem_alloc (id->max_vports * sizeof (*id->vports));
1771 id->max_rxq_per_msg =
1772 (IDPF_DFLT_MBX_BUF_SIZE - sizeof (virtchnl2_config_rx_queues_t)) /
1773 sizeof (virtchnl2_rxq_info_t);
1774 id->max_txq_per_msg =
1775 (IDPF_DFLT_MBX_BUF_SIZE - sizeof (virtchnl2_config_tx_queues_t)) /
1776 sizeof (virtchnl2_txq_info_t);
1778 id->cur_vport_idx = 0;
1780 id->cur_vport_nb = 0;
1782 if (!args->rxq_single)
1784 if (!args->txq_single)
1787 /* Init and enable vports */
1788 if (args->req_vport_nb == 1)
1790 vport_param.id = id;
1791 vport_param.idx = 0;
1792 error = idpf_dev_vport_init (vm, id, &vport_param);
1795 vport = id->vports[vport_param.idx];
1796 error = idpf_ena_dis_vport (vm, id, vport, true);
1799 id->cur_vports |= 1ULL << vport_param.idx;
1801 id->cur_vport_idx++;
1802 error = idpf_queue_init (vm, id, vport, args);
1808 for (i = 0; i < args->req_vport_nb; i++)
1810 vport_param.id = id;
1811 vport_param.idx = i;
1812 if ((error = idpf_dev_vport_init (vm, id, &vport_param)))
1814 vport = id->vports[vport_param.idx];
1815 error = idpf_ena_dis_vport (vm, id, vport, true);
1818 id->cur_vports |= 1ULL << vport_param.idx;
1820 id->cur_vport_idx++;
1821 error = idpf_queue_init (vm, id, vport, args);
1827 id->flags |= IDPF_DEVICE_F_INITIALIZED;
1832 idpf_flag_change (vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
1834 idpf_device_t *id = idpf_get_device (hw->dev_instance);
1838 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
1839 id->flags &= ~IDPF_DEVICE_F_PROMISC;
1841 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
1842 id->flags |= IDPF_DEVICE_F_PROMISC;
1852 idpf_delete_if (vlib_main_t *vm, idpf_device_t *id, int with_barrier)
1854 vnet_main_t *vnm = vnet_get_main ();
1855 idpf_main_t *im = &idpf_main;
1856 idpf_vport_t *vport;
1860 id->flags &= ~IDPF_DEVICE_F_ADMIN_UP;
1862 if (id->hw_if_index)
1865 vlib_worker_thread_barrier_sync (vm);
1866 vnet_hw_interface_set_flags (vnm, id->hw_if_index, 0);
1867 ethernet_delete_interface (vnm, id->hw_if_index);
1869 vlib_worker_thread_barrier_release (vm);
1872 for (i = 0; i < id->cur_vport_nb; i++)
1874 vport = id->vports[i];
1875 if (vport->recv_vectors != NULL)
1876 idpf_dealloc_vectors (vm, id, vport);
1879 vlib_pci_device_close (vm, id->pci_dev_handle);
1881 vlib_physmem_free (vm, id->asq);
1882 vlib_physmem_free (vm, id->arq);
1884 for (i = 0; i < id->cur_vport_nb; i++)
1886 vport = id->vports[i];
1887 vec_foreach_index (i, vport->rxqs)
1889 idpf_rxq_t *rxq = vec_elt_at_index (vport->rxqs, i);
1890 vlib_physmem_free (vm, (void *) rxq->descs);
1891 if (rxq->n_enqueued)
1892 vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1894 vec_free (rxq->bufs);
1897 vec_free (vport->rxqs);
1899 vec_foreach_index (i, vport->txqs)
1901 idpf_txq_t *txq = vec_elt_at_index (vport->txqs, i);
1902 vlib_physmem_free (vm, (void *) txq->descs);
1903 if (txq->n_enqueued)
1905 u16 first = (txq->next - txq->n_enqueued) & (txq->size - 1);
1906 vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1909 vec_free (txq->ph_bufs);
1910 vec_free (txq->bufs);
1911 clib_ring_free (txq->rs_slots);
1912 vec_free (txq->tmp_bufs);
1913 vec_free (txq->tmp_descs);
1914 clib_spinlock_free (&txq->lock);
1916 vec_free (vport->txqs);
1919 vec_free (id->name);
1921 clib_error_free (id->error);
1922 dev_instance = id->dev_instance;
1923 clib_mem_free (id->mbx_resp);
1924 clib_memset (id, 0, sizeof (*id));
1925 pool_put_index (im->devices, dev_instance);
1930 idpf_validate_queue_size (idpf_create_if_args_t *args)
1932 clib_error_t *error = 0;
1934 args->rxq_size = (args->rxq_size == 0) ? IDPF_RXQ_SZ : args->rxq_size;
1935 args->txq_size = (args->txq_size == 0) ? IDPF_TXQ_SZ : args->txq_size;
1937 if ((args->rxq_size > IDPF_QUEUE_SZ_MAX) ||
1938 (args->txq_size > IDPF_QUEUE_SZ_MAX))
1940 args->rv = VNET_API_ERROR_INVALID_VALUE;
1941 args->error = clib_error_return (
1942 error, "queue size must not be greater than %u", IDPF_QUEUE_SZ_MAX);
1945 if ((args->rxq_size < IDPF_QUEUE_SZ_MIN) ||
1946 (args->txq_size < IDPF_QUEUE_SZ_MIN))
1948 args->rv = VNET_API_ERROR_INVALID_VALUE;
1949 args->error = clib_error_return (
1950 error, "queue size must not be smaller than %u", IDPF_QUEUE_SZ_MIN);
1953 if ((args->rxq_size & (args->rxq_size - 1)) ||
1954 (args->txq_size & (args->txq_size - 1)))
1956 args->rv = VNET_API_ERROR_INVALID_VALUE;
1958 clib_error_return (error, "queue size must be a power of two");
1965 idpf_process_one_device (vlib_main_t *vm, idpf_device_t *id, int is_irq)
1972 idpf_process (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
1974 idpf_main_t *im = &idpf_main;
1975 uword *event_data = 0, event_type;
1976 int enabled = 0, irq;
1977 f64 last_run_duration = 0;
1978 f64 last_periodic_time = 0;
1979 idpf_device_t **dev_pointers = 0;
1985 vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1987 vlib_process_wait_for_event (vm);
1989 event_type = vlib_process_get_events (vm, &event_data);
1995 last_periodic_time = vlib_time_now (vm);
1997 case IDPF_PROCESS_EVENT_START:
2000 case IDPF_PROCESS_EVENT_DELETE_IF:
2001 for (int i = 0; i < vec_len (event_data); i++)
2003 idpf_device_t *id = idpf_get_device (event_data[i]);
2004 idpf_delete_if (vm, id, /* with_barrier */ 1);
2006 if (pool_elts (im->devices) < 1)
2009 case IDPF_PROCESS_EVENT_AQ_INT:
2017 vec_reset_length (event_data);
2022 /* create local list of device pointers as device pool may grow
2024 vec_reset_length (dev_pointers);
2026 pool_foreach_index (i, im->devices)
2028 vec_add1 (dev_pointers, idpf_get_device (i));
2031 vec_foreach_index (i, dev_pointers)
2033 idpf_process_one_device (vm, dev_pointers[i], irq);
2036 last_run_duration = vlib_time_now (vm) - last_periodic_time;
2041 VLIB_REGISTER_NODE (idpf_process_node) = {
2042 .function = idpf_process,
2043 .type = VLIB_NODE_TYPE_PROCESS,
2044 .name = "idpf-process",
2048 idpf_create_if (vlib_main_t *vm, idpf_create_if_args_t *args)
2050 vnet_main_t *vnm = vnet_get_main ();
2051 vnet_eth_interface_registration_t eir = {};
2052 idpf_main_t *im = &idpf_main;
2053 idpf_device_t *id, **idp;
2054 vlib_pci_dev_handle_t h;
2055 clib_error_t *error = 0;
2058 /* check input args */
2059 if (idpf_validate_queue_size (args) != 0)
2062 pool_foreach (idp, im->devices)
2064 if ((*idp)->pci_addr.as_u32 == args->addr.as_u32)
2066 args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
2068 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
2069 &args->addr, "pci address in use");
2074 pool_get (im->devices, idp);
2076 clib_mem_alloc_aligned (sizeof (idpf_device_t), CLIB_CACHE_LINE_BYTES);
2077 clib_memset (id, 0, sizeof (idpf_device_t));
2078 id->mbx_resp = clib_mem_alloc (IDPF_DFLT_MBX_BUF_SIZE);
2079 id->dev_instance = idp - im->devices;
2080 id->per_interface_next_index = ~0;
2081 id->name = vec_dup (args->name);
2084 vlib_pci_device_open (vm, &args->addr, idpf_pci_device_ids, &h)))
2086 pool_put (im->devices, idp);
2088 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
2089 args->error = clib_error_return (error, "pci-addr %U",
2090 format_vlib_pci_addr, &args->addr);
2093 id->pci_dev_handle = h;
2094 id->pci_addr = args->addr;
2095 id->numa_node = vlib_pci_get_numa_node (vm, h);
2097 vlib_pci_set_private_data (vm, h, id->dev_instance);
2099 if ((error = vlib_pci_bus_master_enable (vm, h)))
2102 if ((error = vlib_pci_map_region (vm, h, 0, &id->bar0)))
2105 if (vlib_pci_supports_virtual_addr_dma (vm, h))
2106 id->flags |= IDPF_DEVICE_F_VA_DMA;
2108 if ((error = idpf_device_init (vm, im, id, args)))
2111 /* create interface */
2112 eir.dev_class_index = idpf_device_class.index;
2113 eir.dev_instance = id->dev_instance;
2114 eir.address = id->hwaddr;
2115 eir.cb.flag_change = idpf_flag_change;
2116 id->hw_if_index = vnet_eth_register_interface (vnm, &eir);
2118 ethernet_set_flags (vnm, id->hw_if_index,
2119 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
2121 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, id->hw_if_index);
2122 args->sw_if_index = id->sw_if_index = sw->sw_if_index;
2124 vnet_hw_if_set_caps (vnm, id->hw_if_index,
2125 VNET_HW_IF_CAP_INT_MODE | VNET_HW_IF_CAP_MAC_FILTER |
2126 VNET_HW_IF_CAP_TX_CKSUM | VNET_HW_IF_CAP_TCP_GSO);
2128 for (v = 0; v < id->cur_vport_nb; v++)
2130 for (j = 0; j < id->n_rx_queues; j++)
2133 i = v * id->n_rx_queues + j;
2134 qi = vnet_hw_if_register_rx_queue (vnm, id->hw_if_index, i,
2135 VNET_HW_IF_RXQ_THREAD_ANY);
2136 id->vports[v]->rxqs[j].queue_index = qi;
2138 for (j = 0; j < id->n_tx_queues; j++)
2141 i = v * id->n_tx_queues + j;
2142 qi = vnet_hw_if_register_tx_queue (vnm, id->hw_if_index, i);
2143 id->vports[v]->txqs[j].queue_index = qi;
2147 for (v = 0; v < id->cur_vport_nb; v++)
2148 for (i = 0; i < vlib_get_n_threads (); i++)
2150 u32 qi = id->vports[v]->txqs[i % id->n_tx_queues].queue_index;
2151 vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
2154 vnet_hw_if_update_runtime_data (vnm, id->hw_if_index);
2156 if (pool_elts (im->devices) == 1)
2157 vlib_process_signal_event (vm, idpf_process_node.index,
2158 IDPF_PROCESS_EVENT_START, 0);
2163 idpf_delete_if (vm, id, /* with_barrier */ 0);
2164 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
2165 args->error = clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
2167 idpf_log_err (id, "error: %U", format_clib_error, args->error);
2171 idpf_alloc_dma_mem (vlib_main_t *vm, idpf_device_t *id, idpf_dma_mem_t *mem,
2175 vlib_pci_dev_handle_t h = id->pci_dev_handle;
2181 mz = vlib_physmem_alloc_aligned_on_numa (vm, size, CLIB_CACHE_LINE_BYTES,
2185 if (vlib_pci_map_dma (vm, h, mz))
2189 if (id->flags & IDPF_DEVICE_F_VA_DMA)
2192 clib_memset (mem->va, 0, size);
2198 mem->pa = idpf_dma_addr (vm, id, mz);
2204 idpf_free_dma_mem (idpf_device_t *id, idpf_dma_mem_t *mem)
2210 clib_mem_free (mem);
2213 static clib_error_t *
2214 idpf_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
2216 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
2217 idpf_device_t *id = idpf_get_device (hi->dev_instance);
2218 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
2220 if (id->flags & IDPF_DEVICE_F_ERROR)
2221 return clib_error_return (0, "device is in error state");
2225 vnet_hw_interface_set_flags (vnm, id->hw_if_index,
2226 VNET_HW_INTERFACE_FLAG_LINK_UP);
2227 id->flags |= IDPF_DEVICE_F_ADMIN_UP;
2231 vnet_hw_interface_set_flags (vnm, id->hw_if_index, 0);
2232 id->flags &= ~IDPF_DEVICE_F_ADMIN_UP;
2237 VNET_DEVICE_CLASS (idpf_device_class, ) = {
2238 .name = "Infrastructure Data Path Function (IDPF) interface",
2239 .format_device_name = format_idpf_device_name,
2240 .admin_up_down_function = idpf_interface_admin_up_down,
2244 idpf_init (vlib_main_t *vm)
2246 idpf_main_t *im = &idpf_main;
2247 vlib_thread_main_t *tm = vlib_get_thread_main ();
2249 vec_validate_aligned (im->per_thread_data, tm->n_vlib_mains - 1,
2250 CLIB_CACHE_LINE_BYTES);
2255 VLIB_INIT_FUNCTION (idpf_init) = {
2256 .runs_after = VLIB_INITS ("pci_bus_init"),
2260 * fd.io coding-style-patch-verification: ON
2263 * eval: (c-set-style "gnu")