2 *------------------------------------------------------------------
3 * Copyright (c) 2023 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <idpf/idpf.h>
21 * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
22 * @hw: pointer to hw struct
23 * @cq: pointer to the specific Control queue
26 idpf_ctlq_alloc_desc_ring (vlib_main_t *vm, idpf_device_t *id,
27 struct idpf_ctlq_info *cq)
29 size_t size = cq->ring_size * sizeof (idpf_ctlq_desc_t);
31 /* Fixme: alloc dma va */
32 cq->desc_ring.va = idpf_alloc_dma_mem (vm, id, &cq->desc_ring, size);
33 if (!cq->desc_ring.va)
34 return IDPF_ERR_NO_MEMORY;
40 * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
41 * @hw: pointer to hw struct
42 * @cq: pointer to the specific Control queue
44 * Allocate the buffer head for all control queues, and if it's a receive
45 * queue, allocate DMA buffers
48 idpf_ctlq_alloc_bufs (vlib_main_t *vm, idpf_device_t *id,
49 struct idpf_ctlq_info *cq)
54 /* Do not allocate DMA buffers for transmit queues */
55 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
58 /* We'll be allocating the buffer info memory first, then we can
59 * allocate the mapped buffers for the event processing
61 len = cq->ring_size * sizeof (idpf_dma_mem_t *);
62 cq->bi.rx_buff = (idpf_dma_mem_t **) clib_mem_alloc (len);
64 return IDPF_ERR_NO_MEMORY;
65 clib_memset (cq->bi.rx_buff, 0, len);
67 /* allocate the mapped buffers (except for the last one) */
68 for (i = 0; i < cq->ring_size - 1; i++)
71 int num = 1; /* number of idpf_dma_mem to be allocated */
74 (idpf_dma_mem_t *) clib_mem_alloc (num * sizeof (idpf_dma_mem_t));
75 if (!cq->bi.rx_buff[i])
76 goto unwind_alloc_cq_bufs;
78 bi = cq->bi.rx_buff[i];
80 bi->va = idpf_alloc_dma_mem (vm, id, bi, cq->buf_size);
83 /* unwind will not free the failed entry */
84 clib_mem_free (cq->bi.rx_buff[i]);
85 goto unwind_alloc_cq_bufs;
92 /* don't try to free the one that failed... */
96 idpf_free_dma_mem (id, cq->bi.rx_buff[i]);
97 clib_mem_free (cq->bi.rx_buff[i]);
99 clib_mem_free (cq->bi.rx_buff);
101 return IDPF_ERR_NO_MEMORY;
105 * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
106 * @hw: pointer to hw struct
107 * @cq: pointer to the specific Control queue
109 * This assumes the posted send buffers have already been cleaned
113 idpf_ctlq_free_desc_ring (idpf_device_t *id, struct idpf_ctlq_info *cq)
115 idpf_free_dma_mem (id, &cq->desc_ring);
119 * idpf_ctlq_free_bufs - Free CQ buffer info elements
120 * @hw: pointer to hw struct
121 * @cq: pointer to the specific Control queue
123 * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
124 * queues. The upper layers are expected to manage freeing of TX DMA buffers
127 idpf_ctlq_free_bufs (idpf_device_t *id, struct idpf_ctlq_info *cq)
131 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
135 /* free DMA buffers for rx queues*/
136 for (i = 0; i < cq->ring_size; i++)
138 if (cq->bi.rx_buff[i])
140 idpf_free_dma_mem (id, cq->bi.rx_buff[i]);
142 clib_mem_free (cq->bi.rx_buff[i]);
146 bi = (void *) cq->bi.rx_buff;
150 bi = (void *) cq->bi.tx_msg;
153 /* free the buffer header */
158 * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
159 * @hw: pointer to hw struct
160 * @cq: pointer to the specific Control queue
162 * Free the memory used by the ring, buffers and other related structures
165 idpf_ctlq_dealloc_ring_res (idpf_device_t *id, struct idpf_ctlq_info *cq)
167 /* free ring buffers and the ring itself */
168 idpf_ctlq_free_bufs (id, cq);
169 idpf_ctlq_free_desc_ring (id, cq);
173 * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
174 * @hw: pointer to hw struct
175 * @cq: pointer to control queue struct
177 * Do *NOT* hold the lock when calling this as the memory allocation routines
178 * called are not going to be atomic context safe
181 idpf_ctlq_alloc_ring_res (vlib_main_t *vm, idpf_device_t *id,
182 struct idpf_ctlq_info *cq)
186 /* verify input for valid configuration */
187 if (!cq->ring_size || !cq->buf_size)
190 /* allocate the ring memory */
191 ret_code = idpf_ctlq_alloc_desc_ring (vm, id, cq);
195 /* allocate buffers in the rings */
196 ret_code = idpf_ctlq_alloc_bufs (vm, id, cq);
198 goto idpf_init_cq_free_ring;
203 idpf_init_cq_free_ring:
204 idpf_free_dma_mem (id, &cq->desc_ring);
209 * idpf_ctlq_setup_regs - initialize control queue registers
210 * @cq: pointer to the specific control queue
211 * @q_create_info: structs containing info for each queue to be initialized
214 idpf_ctlq_setup_regs (struct idpf_ctlq_info *cq,
215 idpf_ctlq_create_info_t *q_create_info)
217 /* set head and tail registers in our local struct */
218 cq->reg.head = q_create_info->reg.head;
219 cq->reg.tail = q_create_info->reg.tail;
220 cq->reg.len = q_create_info->reg.len;
221 cq->reg.bah = q_create_info->reg.bah;
222 cq->reg.bal = q_create_info->reg.bal;
223 cq->reg.len_mask = q_create_info->reg.len_mask;
224 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
225 cq->reg.head_mask = q_create_info->reg.head_mask;
229 * idpf_ctlq_init_regs - Initialize control queue registers
230 * @hw: pointer to hw struct
231 * @cq: pointer to the specific Control queue
232 * @is_rxq: true if receive control queue, false otherwise
234 * Initialize registers. The caller is expected to have already initialized the
235 * descriptor ring memory and buffer memory
238 idpf_ctlq_init_regs (vlib_main_t *vm, idpf_device_t *id,
239 struct idpf_ctlq_info *cq, bool is_rxq)
241 /* Update tail to post pre-allocated buffers for rx queues */
243 idpf_reg_write (id, cq->reg.tail, (u32) (cq->ring_size - 1));
245 /* For non-Mailbox control queues only TAIL need to be set */
249 /* Clear Head for both send or receive */
250 idpf_reg_write (id, cq->reg.head, 0);
252 /* set starting point */
253 idpf_reg_write (id, cq->reg.bal, IDPF_LO_DWORD (cq->desc_ring.pa));
254 idpf_reg_write (id, cq->reg.bah, IDPF_HI_DWORD (cq->desc_ring.pa));
255 idpf_reg_write (id, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
259 * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
260 * @cq: pointer to the specific Control queue
262 * Record the address of the receive queue DMA buffers in the descriptors.
263 * The buffers must have been previously allocated.
266 idpf_ctlq_init_rxq_bufs (struct idpf_ctlq_info *cq)
270 for (i = 0; i < cq->ring_size; i++)
272 idpf_ctlq_desc_t *desc = IDPF_CTLQ_DESC (cq, i);
273 idpf_dma_mem_t *bi = cq->bi.rx_buff[i];
275 /* No buffer to post to descriptor, continue */
279 desc->flags = IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD;
281 desc->datalen = (u16) bi->size;
283 desc->cookie_high = 0;
284 desc->cookie_low = 0;
285 desc->params.indirect.addr_high = IDPF_HI_DWORD (bi->pa);
286 desc->params.indirect.addr_low = IDPF_LO_DWORD (bi->pa);
287 desc->params.indirect.param0 = 0;
288 desc->params.indirect.param1 = 0;
293 * idpf_ctlq_shutdown - shutdown the CQ
294 * @hw: pointer to hw struct
295 * @cq: pointer to the specific Control queue
297 * The main shutdown routine for any controq queue
300 idpf_ctlq_shutdown (idpf_device_t *id, struct idpf_ctlq_info *cq)
302 clib_spinlock_init (&cq->cq_lock);
305 goto shutdown_sq_out;
307 /* free ring buffers and the ring itself */
308 idpf_ctlq_dealloc_ring_res (id, cq);
310 /* Set ring_size to 0 to indicate uninitialized queue */
314 clib_spinlock_unlock (&cq->cq_lock);
315 clib_spinlock_free (&cq->cq_lock);
319 * idpf_ctlq_add - add one control queue
320 * @hw: pointer to hardware struct
321 * @qinfo: info for queue to be created
322 * @cq_out: (output) double pointer to control queue to be created
324 * Allocate and initialize a control queue and add it to the control queue
325 * list. The cq parameter will be allocated/initialized and passed back to the
326 * caller if no errors occur.
328 * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
331 idpf_ctlq_add (vlib_main_t *vm, idpf_device_t *id,
332 idpf_ctlq_create_info_t *qinfo, struct idpf_ctlq_info **cq_out)
335 int status = IDPF_SUCCESS;
337 if (!qinfo->len || !qinfo->buf_size ||
338 qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
339 qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
342 /* Fixme: memory allocation */
343 *cq_out = vlib_physmem_alloc_aligned_on_numa (
344 vm, sizeof (struct idpf_ctlq_info), CLIB_CACHE_LINE_BYTES, id->numa_node);
346 return IDPF_ERR_NO_MEMORY;
348 if ((vlib_pci_map_dma (vm, id->pci_dev_handle, *cq_out)))
350 status = IDPF_ERR_NO_MEMORY;
354 (*cq_out)->cq_type = qinfo->type;
355 (*cq_out)->q_id = qinfo->id;
356 (*cq_out)->buf_size = qinfo->buf_size;
357 (*cq_out)->ring_size = qinfo->len;
359 (*cq_out)->next_to_use = 0;
360 (*cq_out)->next_to_clean = 0;
361 (*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
365 case IDPF_CTLQ_TYPE_MAILBOX_RX:
367 case IDPF_CTLQ_TYPE_MAILBOX_TX:
368 status = idpf_ctlq_alloc_ring_res (vm, id, *cq_out);
371 status = IDPF_ERR_PARAM;
380 idpf_ctlq_init_rxq_bufs (*cq_out);
384 /* Allocate the array of msg pointers for TX queues */
385 (*cq_out)->bi.tx_msg = (idpf_ctlq_msg_t **) clib_mem_alloc (
386 qinfo->len * sizeof (idpf_ctlq_msg_t *));
387 if (!(*cq_out)->bi.tx_msg)
389 status = IDPF_ERR_NO_MEMORY;
390 goto init_dealloc_q_mem;
394 idpf_ctlq_setup_regs (*cq_out, qinfo);
396 idpf_ctlq_init_regs (vm, id, *cq_out, is_rxq);
398 /* Fixeme: lock issue */
399 clib_spinlock_init (&(*cq_out)->cq_lock);
401 LIST_INSERT_HEAD (&id->cq_list_head, (*cq_out), cq_list);
406 /* free ring buffers and the ring itself */
407 idpf_ctlq_dealloc_ring_res (id, *cq_out);
409 clib_mem_free (*cq_out);
415 * idpf_ctlq_remove - deallocate and remove specified control queue
416 * @hw: pointer to hardware struct
417 * @cq: pointer to control queue to be removed
420 idpf_ctlq_remove (idpf_device_t *id, struct idpf_ctlq_info *cq)
422 LIST_REMOVE (cq, cq_list);
423 idpf_ctlq_shutdown (id, cq);
428 * idpf_ctlq_init - main initialization routine for all control queues
429 * @hw: pointer to hardware struct
430 * @num_q: number of queues to initialize
431 * @q_info: array of structs containing info for each queue to be initialized
433 * This initializes any number and any type of control queues. This is an all
434 * or nothing routine; if one fails, all previously allocated queues will be
435 * destroyed. This must be called prior to using the individual add/remove
439 idpf_ctlq_init (vlib_main_t *vm, idpf_device_t *id, u8 num_q,
440 idpf_ctlq_create_info_t *q_info)
442 struct idpf_ctlq_info *cq = NULL;
443 int ret_code = IDPF_SUCCESS;
446 LIST_INIT (&id->cq_list_head);
448 for (i = 0; i < num_q; i++)
450 idpf_ctlq_create_info_t *qinfo = q_info + i;
452 ret_code = idpf_ctlq_add (vm, id, qinfo, &cq);
454 goto init_destroy_qs;
460 LIST_FOR_EACH_ENTRY_SAFE (cq, NULL, &id->cq_list_head, struct idpf_ctlq_info,
463 idpf_ctlq_remove (id, cq);
470 * idpf_ctlq_deinit - destroy all control queues
471 * @hw: pointer to hw struct
474 idpf_ctlq_deinit (idpf_device_t *id)
476 struct idpf_ctlq_info *cq = NULL;
478 LIST_FOR_EACH_ENTRY_SAFE (cq, NULL, &id->cq_list_head, struct idpf_ctlq_info,
481 idpf_ctlq_remove (id, cq);
488 * idpf_ctlq_send - send command to Control Queue (CTQ)
489 * @id: pointer to device struct
490 * @cq: handle to control queue struct to send on
491 * @num_q_msg: number of messages to send on control queue
492 * @q_msg: pointer to array of queue messages to be sent
494 * The caller is expected to allocate DMAable buffers and pass them to the
495 * send routine via the q_msg struct / control queue specific data struct.
496 * The control queue will hold a reference to each send message until
497 * the completion for that message has been cleaned.
500 idpf_ctlq_send (idpf_device_t *id, struct idpf_ctlq_info *cq, u16 num_q_msg,
501 idpf_ctlq_msg_t q_msg[])
503 idpf_ctlq_desc_t *desc;
504 int num_desc_avail = 0;
505 int status = IDPF_SUCCESS;
508 if (!cq || !cq->ring_size)
511 clib_spinlock_lock (&cq->cq_lock);
513 /* Ensure there are enough descriptors to send all messages */
514 num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq);
515 if (num_desc_avail == 0 || num_desc_avail < num_q_msg)
518 goto sq_send_command_out;
521 for (i = 0; i < num_q_msg; i++)
523 idpf_ctlq_msg_t *msg = &q_msg[i];
526 desc = IDPF_CTLQ_DESC (cq, cq->next_to_use);
528 /* Pay attention to CPU_TO_LE16 */
529 desc->opcode = msg->opcode;
530 desc->pfid_vfid = msg->func_id;
532 msg_cookie = msg->cookie.cookie;
533 desc->cookie_high = IDPF_HI_DWORD (msg_cookie);
534 desc->cookie_low = IDPF_LO_DWORD (msg_cookie);
536 desc->flags = (msg->host_id & IDPF_HOST_ID_MASK)
537 << IDPF_CTLQ_FLAG_HOST_ID_S;
540 idpf_dma_mem_t *buff = msg->ctx.indirect.payload;
542 desc->datalen |= msg->data_len;
543 desc->flags |= IDPF_CTLQ_FLAG_BUF;
544 desc->flags |= IDPF_CTLQ_FLAG_RD;
546 /* Update the address values in the desc with the pa
547 * value for respective buffer
549 desc->params.indirect.addr_high = IDPF_HI_DWORD (buff->pa);
550 desc->params.indirect.addr_low = IDPF_LO_DWORD (buff->pa);
552 clib_memcpy (&desc->params, msg->ctx.indirect.context,
553 IDPF_INDIRECT_CTX_SIZE);
557 clib_memcpy (&desc->params, msg->ctx.direct, IDPF_DIRECT_CTX_SIZE);
560 /* Store buffer info */
561 cq->bi.tx_msg[cq->next_to_use] = msg;
564 if (cq->next_to_use == cq->ring_size)
568 /* Force memory write to complete before letting hardware
569 * know that there are new descriptors to fetch.
571 CLIB_MEMORY_BARRIER ();
573 idpf_reg_write (id, cq->reg.tail, cq->next_to_use);
576 clib_spinlock_unlock (&cq->cq_lock);
582 * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
584 * @cq: pointer to the specific Control queue
585 * @clean_count: (input|output) number of descriptors to clean as input, and
586 * number of descriptors actually cleaned as output
587 * @msg_status: (output) pointer to msg pointer array to be populated; needs
588 * to be allocated by caller
590 * Returns an array of message pointers associated with the cleaned
591 * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
592 * descriptors. The status will be returned for each; any messages that failed
593 * to send will have a non-zero status. The caller is expected to free original
594 * ctlq_msgs and free or reuse the DMA buffers.
597 idpf_ctlq_clean_sq (struct idpf_ctlq_info *cq, u16 *clean_count,
598 idpf_ctlq_msg_t *msg_status[])
600 idpf_ctlq_desc_t *desc;
601 u16 i = 0, num_to_clean;
603 int ret = IDPF_SUCCESS;
605 if (!cq || !cq->ring_size)
606 return IDPF_ERR_CTLQ_EMPTY;
608 if (*clean_count == 0)
610 if (*clean_count > cq->ring_size)
611 return IDPF_ERR_PARAM;
614 clib_spinlock_lock (&cq->cq_lock);
616 ntc = cq->next_to_clean;
618 num_to_clean = *clean_count;
620 for (i = 0; i < num_to_clean; i++)
622 /* Fetch next descriptor and check if marked as done */
623 desc = IDPF_CTLQ_DESC (cq, ntc);
624 if (!(desc->flags & IDPF_CTLQ_FLAG_DD))
627 desc_err = desc->ret_val;
630 /* strip off FW internal code */
634 msg_status[i] = cq->bi.tx_msg[ntc];
635 msg_status[i]->status = desc_err;
637 cq->bi.tx_msg[ntc] = NULL;
639 /* Zero out any stale data */
640 clib_memset (desc, 0, sizeof (*desc));
643 if (ntc == cq->ring_size)
647 cq->next_to_clean = ntc;
649 clib_spinlock_unlock (&cq->cq_lock);
651 /* Return number of descriptors actually cleaned */
658 * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
659 * @hw: pointer to hw struct
660 * @cq: pointer to control queue handle
661 * @buff_count: (input|output) input is number of buffers caller is trying to
662 * return; output is number of buffers that were not posted
663 * @buffs: array of pointers to dma mem structs to be given to hardware
665 * Caller uses this function to return DMA buffers to the descriptor ring after
666 * consuming them; buff_count will be the number of buffers.
668 * Note: this function needs to be called after a receive call even
669 * if there are no DMA buffers to be returned, i.e. buff_count = 0,
670 * buffs = NULL to support direct commands
673 idpf_ctlq_post_rx_buffs (idpf_device_t *id, struct idpf_ctlq_info *cq,
674 u16 *buff_count, idpf_dma_mem_t **buffs)
676 idpf_ctlq_desc_t *desc;
677 u16 ntp = cq->next_to_post;
678 bool buffs_avail = false;
680 int status = IDPF_SUCCESS;
683 if (*buff_count > cq->ring_size)
684 return IDPF_ERR_PARAM;
689 clib_spinlock_lock (&cq->cq_lock);
691 if (tbp >= cq->ring_size)
694 if (tbp == cq->next_to_clean)
698 /* Post buffers for as many as provided or up until the last one used */
699 while (ntp != cq->next_to_clean)
701 desc = IDPF_CTLQ_DESC (cq, ntp);
703 if (cq->bi.rx_buff[ntp])
707 /* If the caller hasn't given us any buffers or
708 * there are none left, search the ring itself
709 * for an available buffer to move to this
710 * entry starting at the next entry in the ring
714 /* Wrap ring if necessary */
715 if (tbp >= cq->ring_size)
718 while (tbp != cq->next_to_clean)
720 if (cq->bi.rx_buff[tbp])
722 cq->bi.rx_buff[ntp] = cq->bi.rx_buff[tbp];
723 cq->bi.rx_buff[tbp] = NULL;
725 /* Found a buffer, no need to
731 /* Wrap ring if necessary */
733 if (tbp >= cq->ring_size)
737 if (tbp == cq->next_to_clean)
742 /* Give back pointer to DMA buffer */
743 cq->bi.rx_buff[ntp] = buffs[i];
746 if (i >= *buff_count)
751 desc->flags = IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD;
753 /* Post buffers to descriptor */
754 desc->datalen = cq->bi.rx_buff[ntp]->size;
755 desc->params.indirect.addr_high =
756 IDPF_HI_DWORD (cq->bi.rx_buff[ntp]->pa);
757 desc->params.indirect.addr_low = IDPF_LO_DWORD (cq->bi.rx_buff[ntp]->pa);
760 if (ntp == cq->ring_size)
765 /* Only update tail if buffers were actually posted */
766 if (cq->next_to_post != ntp)
769 /* Update next_to_post to ntp - 1 since current ntp
770 * will not have a buffer
772 cq->next_to_post = ntp - 1;
774 /* Wrap to end of end ring since current ntp is 0 */
775 cq->next_to_post = cq->ring_size - 1;
777 idpf_reg_write (id, cq->reg.tail, cq->next_to_post);
780 clib_spinlock_unlock (&cq->cq_lock);
782 /* return the number of buffers that were not posted */
783 *buff_count = *buff_count - i;
789 * idpf_ctlq_recv - receive control queue message call back
790 * @cq: pointer to control queue handle to receive on
791 * @num_q_msg: (input|output) input number of messages that should be received;
792 * output number of messages actually received
793 * @q_msg: (output) array of received control queue messages on this q;
794 * needs to be pre-allocated by caller for as many messages as requested
796 * Called by interrupt handler or polling mechanism. Caller is expected
800 idpf_ctlq_recv (struct idpf_ctlq_info *cq, u16 *num_q_msg,
801 idpf_ctlq_msg_t *q_msg)
803 u16 num_to_clean, ntc, ret_val, flags;
804 idpf_ctlq_desc_t *desc;
808 if (!cq || !cq->ring_size)
813 else if (*num_q_msg > cq->ring_size)
816 /* Fixme: take the lock before we start messing with the ring */
817 clib_spinlock_lock (&cq->cq_lock);
819 ntc = cq->next_to_clean;
821 num_to_clean = *num_q_msg;
823 for (i = 0; i < num_to_clean; i++)
827 /* Fetch next descriptor and check if marked as done */
828 desc = IDPF_CTLQ_DESC (cq, ntc);
831 if (!(flags & IDPF_CTLQ_FLAG_DD))
834 ret_val = desc->ret_val;
837 (flags & (IDPF_CTLQ_FLAG_FTYPE_VM | IDPF_CTLQ_FLAG_FTYPE_PF)) >>
838 IDPF_CTLQ_FLAG_FTYPE_S;
840 if (flags & IDPF_CTLQ_FLAG_ERR)
841 ret_code = IDPF_ERR_CTLQ_ERROR;
843 msg_cookie = (u64) desc->cookie_high << 32;
844 msg_cookie |= (u64) desc->cookie_low;
845 clib_memcpy_fast (&q_msg[i].cookie, &msg_cookie, sizeof (u64));
847 q_msg[i].opcode = desc->opcode;
848 q_msg[i].data_len = desc->datalen;
849 q_msg[i].status = ret_val;
853 clib_memcpy_fast (q_msg[i].ctx.indirect.context,
854 &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
856 /* Assign pointer to dma buffer to ctlq_msg array
857 * to be given to upper layer
859 q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
861 /* Zero out pointer to DMA buffer info;
862 * will be repopulated by post buffers API
864 cq->bi.rx_buff[ntc] = NULL;
868 clib_memcpy_fast (q_msg[i].ctx.direct, desc->params.raw,
869 IDPF_DIRECT_CTX_SIZE);
872 /* Zero out stale data in descriptor */
873 clib_memset (desc, 0, sizeof (idpf_ctlq_desc_t));
876 if (ntc == cq->ring_size)
880 cq->next_to_clean = ntc;
883 clib_spinlock_unlock (&cq->cq_lock);