2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
24 #include "dpdk_priv.h"
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
35 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
36 foreach_dpdk_tx_func_error
39 } dpdk_tx_func_error_t;
41 static char *dpdk_tx_func_error_strings[] = {
43 foreach_dpdk_tx_func_error
48 dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address)
51 dpdk_main_t *dm = &dpdk_main;
52 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
54 error = rte_eth_dev_default_mac_addr_set (xd->device_index,
55 (struct ether_addr *) address);
59 return clib_error_return (0, "mac address set failed: %d", error);
68 dpdk_set_mc_filter (vnet_hw_interface_t * hi,
69 struct ether_addr mc_addr_vec[], int naddr)
72 dpdk_main_t *dm = &dpdk_main;
73 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
75 error = rte_eth_dev_set_mc_addr_list (xd->device_index, mc_addr_vec, naddr);
79 return clib_error_return (0, "mc addr list failed: %d", error);
88 dpdk_replicate_packet_mb (vlib_buffer_t * b)
90 vlib_main_t *vm = vlib_get_main ();
91 vlib_buffer_main_t *bm = vm->buffer_main;
92 struct rte_mbuf *first_mb = 0, *new_mb, *pkt_mb, **prev_mb_next = 0;
93 u8 nb_segs, nb_segs_left;
95 unsigned socket_id = rte_socket_id ();
97 ASSERT (bm->pktmbuf_pools[socket_id]);
98 pkt_mb = rte_mbuf_from_vlib_buffer (b);
99 nb_segs = pkt_mb->nb_segs;
100 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
102 if (PREDICT_FALSE (pkt_mb == 0))
104 clib_warning ("Missing %d mbuf chain segment(s): "
105 "(nb_segs = %d, nb_segs_left = %d)!",
106 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
108 rte_pktmbuf_free (first_mb);
111 new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
112 if (PREDICT_FALSE (new_mb == 0))
115 rte_pktmbuf_free (first_mb);
120 * Copy packet info into 1st segment.
125 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
126 first_mb->nb_segs = pkt_mb->nb_segs;
127 first_mb->port = pkt_mb->port;
128 #ifdef DAW_FIXME // TX Offload support TBD
129 first_mb->vlan_macip = pkt_mb->vlan_macip;
130 first_mb->hash = pkt_mb->hash;
131 first_mb->ol_flags = pkt_mb->ol_flags
136 ASSERT (prev_mb_next != 0);
137 *prev_mb_next = new_mb;
141 * Copy packet segment data into new mbuf segment.
143 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
144 copy_bytes = pkt_mb->data_len + RTE_PKTMBUF_HEADROOM;
145 ASSERT (copy_bytes <= pkt_mb->buf_len);
146 clib_memcpy (new_mb->buf_addr, pkt_mb->buf_addr, copy_bytes);
148 prev_mb_next = &new_mb->next;
149 pkt_mb = pkt_mb->next;
152 ASSERT (pkt_mb == 0);
153 __rte_mbuf_sanity_check (first_mb, 1);
159 dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b)
161 vlib_main_t *vm = vlib_get_main ();
162 vlib_buffer_main_t *bm = vm->buffer_main;
163 struct rte_mbuf *first_mb = 0, *new_mb, *pkt_mb, **prev_mb_next = 0;
164 u8 nb_segs, nb_segs_left;
165 unsigned socket_id = rte_socket_id ();
167 ASSERT (bm->pktmbuf_pools[socket_id]);
168 pkt_mb = rte_mbuf_from_vlib_buffer (b);
169 nb_segs = pkt_mb->nb_segs;
170 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
172 if (PREDICT_FALSE (pkt_mb == 0))
174 clib_warning ("Missing %d mbuf chain segment(s): "
175 "(nb_segs = %d, nb_segs_left = %d)!",
176 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
178 rte_pktmbuf_free (first_mb);
181 new_mb = rte_pktmbuf_clone (pkt_mb, bm->pktmbuf_pools[socket_id]);
182 if (PREDICT_FALSE (new_mb == 0))
185 rte_pktmbuf_free (first_mb);
190 * Copy packet info into 1st segment.
195 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
196 first_mb->nb_segs = pkt_mb->nb_segs;
197 first_mb->port = pkt_mb->port;
198 #ifdef DAW_FIXME // TX Offload support TBD
199 first_mb->vlan_macip = pkt_mb->vlan_macip;
200 first_mb->hash = pkt_mb->hash;
201 first_mb->ol_flags = pkt_mb->ol_flags
206 ASSERT (prev_mb_next != 0);
207 *prev_mb_next = new_mb;
211 * Copy packet segment data into new mbuf segment.
213 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
215 prev_mb_next = &new_mb->next;
216 pkt_mb = pkt_mb->next;
219 ASSERT (pkt_mb == 0);
220 __rte_mbuf_sanity_check (first_mb, 1);
228 dpdk_tx_trace_buffer (dpdk_main_t * dm,
229 vlib_node_runtime_t * node,
231 u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer)
233 vlib_main_t *vm = vlib_get_main ();
234 dpdk_tx_dma_trace_t *t0;
237 mb = rte_mbuf_from_vlib_buffer (buffer);
239 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
240 t0->queue_index = queue_id;
241 t0->device_index = xd->device_index;
242 t0->buffer_index = buffer_index;
243 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
244 clib_memcpy (&t0->buffer, buffer,
245 sizeof (buffer[0]) - sizeof (buffer->pre_data));
246 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
247 sizeof (t0->buffer.pre_data));
251 * This function calls the dpdk's tx_burst function to transmit the packets
252 * on the tx_vector. It manages a lock per-device if the device does not
253 * support multiple queues. It returns the number of packets untransmitted
254 * on the tx_vector. If all packets are transmitted (the normal case), the
255 * function returns 0.
257 * The tx_burst function may not be able to transmit all packets because the
258 * dpdk ring is full. If a flowcontrol callback function has been configured
259 * then the function simply returns. If no callback has been configured, the
260 * function will retry calling tx_burst with the remaining packets. This will
261 * continue until all packets are transmitted or tx_burst indicates no packets
262 * could be transmitted. (The caller can drop the remaining packets.)
264 * The function assumes there is at least one packet on the tx_vector.
267 u32 tx_burst_vector_internal (vlib_main_t * vm,
269 struct rte_mbuf **tx_vector)
271 dpdk_main_t *dm = &dpdk_main;
280 ring = vec_header (tx_vector, sizeof (*ring));
282 n_packets = ring->tx_head - ring->tx_tail;
284 tx_head = ring->tx_head % DPDK_TX_RING_SIZE;
287 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
288 * unpredictable results.
290 ASSERT (n_packets > 0);
293 * Check for tx_vector overflow. If this fails it is a system configuration
294 * error. The ring should be sized big enough to handle the largest un-flowed
295 * off burst from a traffic manager. A larger size also helps performance
296 * a bit because it decreases the probability of having to issue two tx_burst
297 * calls due to a ring wrap.
299 ASSERT (n_packets < DPDK_TX_RING_SIZE);
302 * If there is no flowcontrol callback, there is only temporary buffering
303 * on the tx_vector and so the tail should always be 0.
305 ASSERT (dm->flowcontrol_callback || ring->tx_tail == 0);
308 * If there is a flowcontrol callback, don't retry any incomplete tx_bursts.
309 * Apply backpressure instead. If there is no callback, keep retrying until
310 * a tx_burst sends no packets. n_retry of 255 essentially means no retry
313 n_retry = dm->flowcontrol_callback ? 0 : 255;
315 queue_id = vm->cpu_index;
319 /* start the burst at the tail */
320 tx_tail = ring->tx_tail % DPDK_TX_RING_SIZE;
323 * This device only supports one TX queue,
324 * and we're running multi-threaded...
326 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0 &&
329 queue_id = queue_id % xd->tx_q_used;
330 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
332 queue_id = (queue_id + 1) % xd->tx_q_used;
335 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
337 if (PREDICT_TRUE (tx_head > tx_tail))
339 /* no wrap, transmit in one burst */
340 rv = rte_eth_tx_burst (xd->device_index,
343 (uint16_t) (tx_head - tx_tail));
348 * This can only happen if there is a flowcontrol callback.
349 * We need to split the transmit into two calls: one for
350 * the packets up to the wrap point, and one to continue
351 * at the start of the ring.
352 * Transmit pkts up to the wrap point.
354 rv = rte_eth_tx_burst (xd->device_index,
357 (uint16_t) (DPDK_TX_RING_SIZE -
361 * If we transmitted everything we wanted, then allow 1 retry
362 * so we can try to transmit the rest. If we didn't transmit
363 * everything, stop now.
365 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
369 else if (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER)
375 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1));
379 dpdk_device_and_queue_t *dq;
380 vec_foreach (dq, dm->devices_by_cpu[vm->cpu_index])
382 if (xd->device_index == dq->device)
386 offset = dq->queue_id * VIRTIO_QNUM;
388 if (PREDICT_TRUE (tx_head > tx_tail))
392 struct rte_mbuf **pkts = &tx_vector[tx_tail];
393 for (i = 0; i < (tx_head - tx_tail); i++)
395 struct rte_mbuf *buff = pkts[i];
396 bytes += rte_pktmbuf_data_len (buff);
399 /* no wrap, transmit in one burst */
401 rte_vhost_enqueue_burst (&xd->vu_vhost_dev,
404 (uint16_t) (tx_head - tx_tail));
405 if (PREDICT_TRUE (rv > 0))
407 dpdk_vu_vring *vring =
408 &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
409 vring->packets += rv;
410 vring->bytes += bytes;
412 if (dpdk_vhost_user_want_interrupt
413 (xd, offset + VIRTIO_RXQ))
415 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
416 vring->n_since_last_int += rv;
418 f64 now = vlib_time_now (vm);
419 if (vring->int_deadline < now ||
420 vring->n_since_last_int >
421 dm->conf->vhost_coalesce_frames)
422 dpdk_vhost_user_send_interrupt (vm, xd,
423 offset + VIRTIO_RXQ);
428 rte_pktmbuf_free (tx_vector[tx_tail + c]);
434 * If we transmitted everything we wanted, then allow 1 retry
435 * so we can try to transmit the rest. If we didn't transmit
436 * everything, stop now.
440 struct rte_mbuf **pkts = &tx_vector[tx_tail];
441 for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++)
443 struct rte_mbuf *buff = pkts[i];
444 bytes += rte_pktmbuf_data_len (buff);
447 rte_vhost_enqueue_burst (&xd->vu_vhost_dev,
450 (uint16_t) (DPDK_TX_RING_SIZE -
453 if (PREDICT_TRUE (rv > 0))
455 dpdk_vu_vring *vring =
456 &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
457 vring->packets += rv;
458 vring->bytes += bytes;
460 if (dpdk_vhost_user_want_interrupt
461 (xd, offset + VIRTIO_RXQ))
463 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
464 vring->n_since_last_int += rv;
466 f64 now = vlib_time_now (vm);
467 if (vring->int_deadline < now ||
468 vring->n_since_last_int >
469 dm->conf->vhost_coalesce_frames)
470 dpdk_vhost_user_send_interrupt (vm, xd,
471 offset + VIRTIO_RXQ);
476 rte_pktmbuf_free (tx_vector[tx_tail + c]);
479 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
483 *xd->lockp[queue_id] = 0;
487 else if (xd->flags & DPDK_DEVICE_FLAG_KNI)
489 if (PREDICT_TRUE (tx_head > tx_tail))
491 /* no wrap, transmit in one burst */
492 rv = rte_kni_tx_burst (xd->kni,
494 (uint16_t) (tx_head - tx_tail));
499 * This can only happen if there is a flowcontrol callback.
500 * We need to split the transmit into two calls: one for
501 * the packets up to the wrap point, and one to continue
502 * at the start of the ring.
503 * Transmit pkts up to the wrap point.
505 rv = rte_kni_tx_burst (xd->kni,
507 (uint16_t) (DPDK_TX_RING_SIZE -
511 * If we transmitted everything we wanted, then allow 1 retry
512 * so we can try to transmit the rest. If we didn't transmit
513 * everything, stop now.
515 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
525 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0 &&
527 *xd->lockp[queue_id] = 0;
529 if (PREDICT_FALSE (rv < 0))
531 // emit non-fatal message, bump counter
532 vnet_main_t *vnm = dm->vnet_main;
533 vnet_interface_main_t *im = &vnm->interface_main;
536 node_index = vec_elt_at_index (im->hw_interfaces,
537 xd->vlib_hw_if_index)->tx_node_index;
539 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
540 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index,
542 return n_packets; // untransmitted packets
544 ring->tx_tail += (u16) rv;
545 n_packets -= (uint16_t) rv;
547 while (rv && n_packets && (n_retry > 0));
554 * This function transmits any packets on the interface's tx_vector and returns
555 * the number of packets untransmitted on the tx_vector. If the tx_vector is
556 * empty the function simply returns 0.
558 * It is intended to be called by a traffic manager which has flowed-off an
559 * interface to see if the interface can be flowed-on again.
562 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance)
564 dpdk_main_t *dm = &dpdk_main;
567 struct rte_mbuf **tx_vector;
570 /* param is dev_instance and not hw_if_index to save another lookup */
571 xd = vec_elt_at_index (dm->devices, dev_instance);
573 queue_id = vm->cpu_index;
574 tx_vector = xd->tx_vectors[queue_id];
576 /* If no packets on the ring, don't bother calling tx function */
577 ring = vec_header (tx_vector, sizeof (*ring));
578 if (ring->tx_head == ring->tx_tail)
583 return tx_burst_vector_internal (vm, xd, tx_vector);
587 * Transmits the packets on the frame to the interface associated with the
588 * node. It first copies packets on the frame to a tx_vector containing the
589 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
590 * which calls the dpdk tx_burst function.
592 * The tx_vector is treated slightly differently depending on whether or
593 * not a flowcontrol callback function has been configured. If there is no
594 * callback, the tx_vector is a temporary array of rte_mbuf packet pointers.
595 * Its entries are written and consumed before the function exits.
597 * If there is a callback then the transmit is being invoked in the presence
598 * of a traffic manager. Here the tx_vector is treated like a ring of rte_mbuf
599 * pointers. If not all packets can be transmitted, the untransmitted packets
600 * stay on the tx_vector until the next call. The callback allows the traffic
601 * manager to flow-off dequeues to the interface. The companion function
602 * dpdk_interface_tx_vector() allows the traffic manager to detect when
603 * it should flow-on the interface again.
606 dpdk_interface_tx (vlib_main_t * vm,
607 vlib_node_runtime_t * node, vlib_frame_t * f)
609 dpdk_main_t *dm = &dpdk_main;
610 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
611 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
612 u32 n_packets = f->n_vectors;
615 struct rte_mbuf **tx_vector;
623 my_cpu = vm->cpu_index;
627 tx_vector = xd->tx_vectors[queue_id];
628 ring = vec_header (tx_vector, sizeof (*ring));
630 n_on_ring = ring->tx_head - ring->tx_tail;
631 from = vlib_frame_vector_args (f);
633 ASSERT (n_packets <= VLIB_FRAME_SIZE);
635 if (PREDICT_FALSE (n_on_ring + n_packets > DPDK_TX_RING_SIZE))
638 * Overflowing the ring should never happen.
639 * If it does then drop the whole frame.
641 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
646 u32 bi0 = from[n_packets];
647 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
648 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0);
649 rte_pktmbuf_free (mb0);
654 if (PREDICT_FALSE (dm->tx_pcap_enable))
660 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
661 if (dm->pcap_sw_if_index == 0 ||
662 dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX])
663 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
669 from = vlib_frame_vector_args (f);
671 i = ring->tx_head % DPDK_TX_RING_SIZE;
677 struct rte_mbuf *mb0, *mb1;
678 struct rte_mbuf *prefmb0, *prefmb1;
679 vlib_buffer_t *b0, *b1;
680 vlib_buffer_t *pref0, *pref1;
682 u16 new_data_len0, new_data_len1;
683 u16 new_pkt_len0, new_pkt_len1;
688 pref0 = vlib_get_buffer (vm, pi0);
689 pref1 = vlib_get_buffer (vm, pi1);
691 prefmb0 = rte_mbuf_from_vlib_buffer (pref0);
692 prefmb1 = rte_mbuf_from_vlib_buffer (pref1);
694 CLIB_PREFETCH (prefmb0, CLIB_CACHE_LINE_BYTES, LOAD);
695 CLIB_PREFETCH (pref0, CLIB_CACHE_LINE_BYTES, LOAD);
696 CLIB_PREFETCH (prefmb1, CLIB_CACHE_LINE_BYTES, LOAD);
697 CLIB_PREFETCH (pref1, CLIB_CACHE_LINE_BYTES, LOAD);
703 b0 = vlib_get_buffer (vm, bi0);
704 b1 = vlib_get_buffer (vm, bi1);
706 mb0 = rte_mbuf_from_vlib_buffer (b0);
707 mb1 = rte_mbuf_from_vlib_buffer (b1);
709 any_clone = (b0->flags & VLIB_BUFFER_RECYCLE)
710 | (b1->flags & VLIB_BUFFER_RECYCLE);
711 if (PREDICT_FALSE (any_clone != 0))
713 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_RECYCLE) != 0))
715 struct rte_mbuf *mb0_new = dpdk_replicate_packet_mb (b0);
716 if (PREDICT_FALSE (mb0_new == 0))
718 vlib_error_count (vm, node->node_index,
719 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
720 b0->flags |= VLIB_BUFFER_REPL_FAIL;
724 vec_add1 (dm->recycle[my_cpu], bi0);
726 if (PREDICT_FALSE ((b1->flags & VLIB_BUFFER_RECYCLE) != 0))
728 struct rte_mbuf *mb1_new = dpdk_replicate_packet_mb (b1);
729 if (PREDICT_FALSE (mb1_new == 0))
731 vlib_error_count (vm, node->node_index,
732 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
733 b1->flags |= VLIB_BUFFER_REPL_FAIL;
737 vec_add1 (dm->recycle[my_cpu], bi1);
741 delta0 = PREDICT_FALSE (b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
742 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
743 delta1 = PREDICT_FALSE (b1->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
744 vlib_buffer_length_in_chain (vm, b1) - (i16) mb1->pkt_len;
746 new_data_len0 = (u16) ((i16) mb0->data_len + delta0);
747 new_data_len1 = (u16) ((i16) mb1->data_len + delta1);
748 new_pkt_len0 = (u16) ((i16) mb0->pkt_len + delta0);
749 new_pkt_len1 = (u16) ((i16) mb1->pkt_len + delta1);
751 b0->current_length = new_data_len0;
752 b1->current_length = new_data_len1;
753 mb0->data_len = new_data_len0;
754 mb1->data_len = new_data_len1;
755 mb0->pkt_len = new_pkt_len0;
756 mb1->pkt_len = new_pkt_len1;
758 mb0->data_off = (PREDICT_FALSE (b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
759 mb0->data_off : (u16) (RTE_PKTMBUF_HEADROOM + b0->current_data);
760 mb1->data_off = (PREDICT_FALSE (b1->flags & VLIB_BUFFER_REPL_FAIL)) ?
761 mb1->data_off : (u16) (RTE_PKTMBUF_HEADROOM + b1->current_data);
763 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
765 if (b0->flags & VLIB_BUFFER_IS_TRACED)
766 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
767 if (b1->flags & VLIB_BUFFER_IS_TRACED)
768 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
771 if (PREDICT_TRUE (any_clone == 0))
773 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
775 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
780 /* cloning was done, need to check for failure */
781 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
783 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
786 if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
788 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
798 struct rte_mbuf *mb0;
807 b0 = vlib_get_buffer (vm, bi0);
809 mb0 = rte_mbuf_from_vlib_buffer (b0);
810 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_RECYCLE) != 0))
812 struct rte_mbuf *mb0_new = dpdk_replicate_packet_mb (b0);
813 if (PREDICT_FALSE (mb0_new == 0))
815 vlib_error_count (vm, node->node_index,
816 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
817 b0->flags |= VLIB_BUFFER_REPL_FAIL;
821 vec_add1 (dm->recycle[my_cpu], bi0);
824 delta0 = PREDICT_FALSE (b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
825 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
827 new_data_len0 = (u16) ((i16) mb0->data_len + delta0);
828 new_pkt_len0 = (u16) ((i16) mb0->pkt_len + delta0);
830 b0->current_length = new_data_len0;
831 mb0->data_len = new_data_len0;
832 mb0->pkt_len = new_pkt_len0;
833 mb0->data_off = (PREDICT_FALSE (b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
834 mb0->data_off : (u16) (RTE_PKTMBUF_HEADROOM + b0->current_data);
836 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
837 if (b0->flags & VLIB_BUFFER_IS_TRACED)
838 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
840 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
842 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
848 /* account for additional packets in the ring */
849 ring->tx_head += n_packets;
850 n_on_ring = ring->tx_head - ring->tx_tail;
852 /* transmit as many packets as possible */
853 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
856 * tx_pkts is the number of packets successfully transmitted
857 * This is the number originally on ring minus the number remaining on ring
859 tx_pkts = n_on_ring - n_packets;
861 if (PREDICT_FALSE (dm->flowcontrol_callback != 0))
863 if (PREDICT_FALSE (n_packets))
865 /* Callback may want to enable flowcontrol */
866 dm->flowcontrol_callback (vm, xd->vlib_hw_if_index,
867 ring->tx_head - ring->tx_tail);
871 /* Reset head/tail to avoid unnecessary wrap */
878 /* If there is no callback then drop any non-transmitted packets */
879 if (PREDICT_FALSE (n_packets))
881 vlib_simple_counter_main_t *cm;
882 vnet_main_t *vnm = vnet_get_main ();
884 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
885 VNET_INTERFACE_COUNTER_TX_ERROR);
887 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
890 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
894 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
897 /* Reset head/tail to avoid unnecessary wrap */
902 /* Recycle replicated buffers */
903 if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu])))
905 vlib_buffer_free (vm, dm->recycle[my_cpu],
906 vec_len (dm->recycle[my_cpu]));
907 _vec_len (dm->recycle[my_cpu]) = 0;
910 ASSERT (ring->tx_head >= ring->tx_tail);
916 dpdk_device_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
919 dpdk_main_t *dm = &dpdk_main;
920 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
922 if (!xd || (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0)
925 ("cannot renumber non-vhost-user interface (sw_if_index: %d)",
930 xd->vu_if_id = new_dev_instance;
936 dpdk_clear_hw_interface_counters (u32 instance)
938 dpdk_main_t *dm = &dpdk_main;
939 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
942 * Set the "last_cleared_stats" to the current stats, so that
943 * things appear to clear from a display perspective.
945 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
947 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
948 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
949 vec_len (xd->last_cleared_xstats) *
950 sizeof (xd->last_cleared_xstats[0]));
953 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER))
956 for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++)
958 xd->vu_intf->vrings[i].packets = 0;
959 xd->vu_intf->vrings[i].bytes = 0;
965 #ifdef RTE_LIBRTE_KNI
967 kni_config_network_if (u8 port_id, u8 if_up)
969 vnet_main_t *vnm = vnet_get_main ();
970 dpdk_main_t *dm = &dpdk_main;
974 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
977 clib_warning ("unknown interface");
982 xd = vec_elt_at_index (dm->devices, p[0]);
985 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
986 if_up ? VNET_HW_INTERFACE_FLAG_LINK_UP |
987 ETH_LINK_FULL_DUPLEX : 0);
992 kni_change_mtu (u8 port_id, unsigned new_mtu)
994 vnet_main_t *vnm = vnet_get_main ();
995 dpdk_main_t *dm = &dpdk_main;
998 vnet_hw_interface_t *hif;
1000 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
1003 clib_warning ("unknown interface");
1008 xd = vec_elt_at_index (dm->devices, p[0]);
1010 hif = vnet_get_hw_interface (vnm, xd->vlib_hw_if_index);
1012 hif->max_packet_bytes = new_mtu;
1018 static clib_error_t *
1019 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1021 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1022 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1023 dpdk_main_t *dm = &dpdk_main;
1024 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
1027 #ifdef RTE_LIBRTE_KNI
1028 if (xd->flags & DPDK_DEVICE_FLAG_KNI)
1032 struct rte_kni_conf conf;
1033 struct rte_kni_ops ops;
1034 vlib_main_t *vm = vlib_get_main ();
1035 vlib_buffer_main_t *bm = vm->buffer_main;
1036 memset (&conf, 0, sizeof (conf));
1037 snprintf (conf.name, RTE_KNI_NAMESIZE, "vpp%u", xd->kni_port_id);
1038 conf.mbuf_size = VLIB_BUFFER_DATA_SIZE;
1039 memset (&ops, 0, sizeof (ops));
1040 ops.port_id = xd->kni_port_id;
1041 ops.change_mtu = kni_change_mtu;
1042 ops.config_network_if = kni_config_network_if;
1045 rte_kni_alloc (bm->pktmbuf_pools[rte_socket_id ()], &conf, &ops);
1048 clib_warning ("failed to allocate kni interface");
1052 hif->max_packet_bytes = 1500; /* kni interface default value */
1053 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
1058 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
1061 kni_rv = rte_kni_release (xd->kni);
1063 clib_warning ("rte_kni_release returned %d", kni_rv);
1069 if (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER)
1073 if (xd->vu_is_running)
1074 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
1075 VNET_HW_INTERFACE_FLAG_LINK_UP |
1076 ETH_LINK_FULL_DUPLEX);
1077 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
1081 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
1082 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
1092 f64 now = vlib_time_now (dm->vlib_main);
1094 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
1095 rv = rte_eth_dev_start (xd->device_index);
1097 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
1098 rte_eth_promiscuous_enable (xd->device_index);
1100 rte_eth_promiscuous_disable (xd->device_index);
1102 rte_eth_allmulticast_enable (xd->device_index);
1103 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
1104 dpdk_update_counters (xd, now);
1105 dpdk_update_link_state (xd, now);
1109 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
1111 rte_eth_allmulticast_disable (xd->device_index);
1112 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
1113 rte_eth_dev_stop (xd->device_index);
1115 /* For bonded interface, stop slave links */
1116 if (xd->pmd == VNET_DPDK_PMD_BOND)
1119 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
1122 u8 dpdk_port = slink[--nlink];
1123 rte_eth_dev_stop (dpdk_port);
1129 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop", rv);
1131 return /* no error */ 0;
1135 * Dynamically redirect all pkts from a specific interface
1136 * to the specified node
1139 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1142 dpdk_main_t *xm = &dpdk_main;
1143 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1144 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1146 /* Shut off redirection */
1147 if (node_index == ~0)
1149 xd->per_interface_next_index = node_index;
1153 xd->per_interface_next_index =
1154 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
1158 static clib_error_t *
1159 dpdk_subif_add_del_function (vnet_main_t * vnm,
1161 struct vnet_sw_interface_t *st, int is_add)
1163 dpdk_main_t *xm = &dpdk_main;
1164 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1165 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1166 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
1167 int r, vlan_offload;
1168 u32 prev_subifs = xd->vlan_subifs;
1172 else if (xd->vlan_subifs)
1175 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
1178 /* currently we program VLANS only for IXGBE VF and I40E VF */
1179 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
1182 if (t->sub.eth.flags.no_tags == 1)
1185 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
1187 xd->vlan_subifs = prev_subifs;
1188 return clib_error_return (0, "unsupported VLAN setup");
1191 vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
1192 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1194 if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
1196 xd->vlan_subifs = prev_subifs;
1197 return clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
1198 xd->device_index, r);
1203 rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
1206 xd->vlan_subifs = prev_subifs;
1207 return clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
1208 xd->device_index, r);
1215 VNET_DEVICE_CLASS (dpdk_device_class) = {
1217 .tx_function = dpdk_interface_tx,
1218 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
1219 .tx_function_error_strings = dpdk_tx_func_error_strings,
1220 .format_device_name = format_dpdk_device_name,
1221 .format_device = format_dpdk_device,
1222 .format_tx_trace = format_dpdk_tx_dma_trace,
1223 .clear_counters = dpdk_clear_hw_interface_counters,
1224 .admin_up_down_function = dpdk_interface_admin_up_down,
1225 .subif_add_del_function = dpdk_subif_add_del_function,
1226 .rx_redirect_to_node = dpdk_set_interface_next_node,
1227 .no_flatten_output_chains = 1,
1228 .name_renumber = dpdk_device_renumber,
1231 VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)
1235 dpdk_set_flowcontrol_callback (vlib_main_t * vm,
1236 dpdk_flowcontrol_callback_t callback)
1238 dpdk_main.flowcontrol_callback = callback;
1241 #define UP_DOWN_FLAG_EVENT 1
1245 dpdk_get_admin_up_down_in_progress (void)
1247 return dpdk_main.admin_up_down_in_progress;
1251 admin_up_down_process (vlib_main_t * vm,
1252 vlib_node_runtime_t * rt, vlib_frame_t * f)
1254 clib_error_t *error = 0;
1256 uword *event_data = 0;
1262 vlib_process_wait_for_event (vm);
1264 event_type = vlib_process_get_events (vm, &event_data);
1266 dpdk_main.admin_up_down_in_progress = 1;
1270 case UP_DOWN_FLAG_EVENT:
1272 if (vec_len (event_data) == 2)
1274 sw_if_index = event_data[0];
1275 flags = event_data[1];
1277 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
1279 clib_error_report (error);
1285 vec_reset_length (event_data);
1287 dpdk_main.admin_up_down_in_progress = 0;
1290 return 0; /* or not */
1294 VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
1295 .function = admin_up_down_process,
1296 .type = VLIB_NODE_TYPE_PROCESS,
1297 .name = "admin-up-down-process",
1298 .process_log2_n_stack_bytes = 17, // 256KB
1303 * Asynchronously invoke vnet_sw_interface_set_flags via the admin_up_down
1304 * process. Useful for avoiding long blocking delays (>150ms) in the dpdk
1306 * WARNING: when posting this event, no other interface-related calls should
1307 * be made (e.g. vnet_create_sw_interface()) while the event is being
1308 * processed (admin_up_down_in_progress). This is required in order to avoid
1309 * race conditions in manipulating interface data structures.
1312 post_sw_interface_set_flags (vlib_main_t * vm, u32 sw_if_index, u32 flags)
1314 uword *d = vlib_process_signal_event_data
1315 (vm, admin_up_down_process_node.index,
1316 UP_DOWN_FLAG_EVENT, 2, sizeof (u32));
1322 * Return a copy of the DPDK port stats in dest.
1325 dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats *dest)
1327 dpdk_main_t *dm = &dpdk_main;
1328 vnet_main_t *vnm = vnet_get_main ();
1329 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1330 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1334 return clib_error_return (0, "Missing or NULL argument");
1338 return clib_error_return (0,
1339 "Unable to get DPDK device from HW interface");
1342 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
1344 clib_memcpy (dest, &xd->stats, sizeof (xd->stats));
1349 * Return the number of dpdk mbufs
1352 dpdk_num_mbufs (void)
1354 dpdk_main_t *dm = &dpdk_main;
1356 return dm->conf->num_mbufs;
1360 * Return the pmd type for a given hardware interface
1363 dpdk_get_pmd_type (vnet_hw_interface_t * hi)
1365 dpdk_main_t *dm = &dpdk_main;
1370 xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1378 * Return the cpu socket for a given hardware interface
1381 dpdk_get_cpu_socket (vnet_hw_interface_t * hi)
1383 dpdk_main_t *dm = &dpdk_main;
1388 xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1392 return xd->cpu_socket;
1396 * fd.io coding-style-patch-verification: ON
1399 * eval: (c-set-style "gnu")