2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
24 #include "dpdk_priv.h"
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
34 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
35 foreach_dpdk_tx_func_error
38 } dpdk_tx_func_error_t;
40 static char * dpdk_tx_func_error_strings[] = {
42 foreach_dpdk_tx_func_error
47 dpdk_set_mac_address (vnet_hw_interface_t * hi, char * address)
50 dpdk_main_t * dm = &dpdk_main;
51 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
53 error=rte_eth_dev_default_mac_addr_set(xd->device_index,
54 (struct ether_addr *) address);
57 return clib_error_return (0, "mac address set failed: %d", error);
64 dpdk_set_mc_filter (vnet_hw_interface_t * hi,
65 struct ether_addr mc_addr_vec[], int naddr)
68 dpdk_main_t * dm = &dpdk_main;
69 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
71 error=rte_eth_dev_set_mc_addr_list(xd->device_index, mc_addr_vec, naddr);
74 return clib_error_return (0, "mc addr list failed: %d", error);
80 struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b)
82 vlib_main_t * vm = vlib_get_main();
83 vlib_buffer_main_t * bm = vm->buffer_main;
84 struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
85 u8 nb_segs, nb_segs_left;
87 unsigned socket_id = rte_socket_id();
89 ASSERT (bm->pktmbuf_pools[socket_id]);
90 pkt_mb = rte_mbuf_from_vlib_buffer(b);
91 nb_segs = pkt_mb->nb_segs;
92 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
94 if (PREDICT_FALSE(pkt_mb == 0))
96 clib_warning ("Missing %d mbuf chain segment(s): "
97 "(nb_segs = %d, nb_segs_left = %d)!",
98 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
100 rte_pktmbuf_free(first_mb);
103 new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
104 if (PREDICT_FALSE(new_mb == 0))
107 rte_pktmbuf_free(first_mb);
112 * Copy packet info into 1st segment.
117 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
118 first_mb->nb_segs = pkt_mb->nb_segs;
119 first_mb->port = pkt_mb->port;
120 #ifdef DAW_FIXME // TX Offload support TBD
121 first_mb->vlan_macip = pkt_mb->vlan_macip;
122 first_mb->hash = pkt_mb->hash;
123 first_mb->ol_flags = pkt_mb->ol_flags
128 ASSERT(prev_mb_next != 0);
129 *prev_mb_next = new_mb;
133 * Copy packet segment data into new mbuf segment.
135 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
136 copy_bytes = pkt_mb->data_len + RTE_PKTMBUF_HEADROOM;
137 ASSERT(copy_bytes <= pkt_mb->buf_len);
138 clib_memcpy(new_mb->buf_addr, pkt_mb->buf_addr, copy_bytes);
140 prev_mb_next = &new_mb->next;
141 pkt_mb = pkt_mb->next;
145 __rte_mbuf_sanity_check(first_mb, 1);
150 struct rte_mbuf * dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b)
152 vlib_main_t * vm = vlib_get_main();
153 vlib_buffer_main_t * bm = vm->buffer_main;
154 struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
155 u8 nb_segs, nb_segs_left;
156 unsigned socket_id = rte_socket_id();
158 ASSERT (bm->pktmbuf_pools[socket_id]);
159 pkt_mb = rte_mbuf_from_vlib_buffer(b);
160 nb_segs = pkt_mb->nb_segs;
161 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
163 if (PREDICT_FALSE(pkt_mb == 0))
165 clib_warning ("Missing %d mbuf chain segment(s): "
166 "(nb_segs = %d, nb_segs_left = %d)!",
167 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
169 rte_pktmbuf_free(first_mb);
172 new_mb = rte_pktmbuf_clone(pkt_mb, bm->pktmbuf_pools[socket_id]);
173 if (PREDICT_FALSE(new_mb == 0))
176 rte_pktmbuf_free(first_mb);
181 * Copy packet info into 1st segment.
186 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
187 first_mb->nb_segs = pkt_mb->nb_segs;
188 first_mb->port = pkt_mb->port;
189 #ifdef DAW_FIXME // TX Offload support TBD
190 first_mb->vlan_macip = pkt_mb->vlan_macip;
191 first_mb->hash = pkt_mb->hash;
192 first_mb->ol_flags = pkt_mb->ol_flags
197 ASSERT(prev_mb_next != 0);
198 *prev_mb_next = new_mb;
202 * Copy packet segment data into new mbuf segment.
204 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
206 prev_mb_next = &new_mb->next;
207 pkt_mb = pkt_mb->next;
211 __rte_mbuf_sanity_check(first_mb, 1);
219 dpdk_tx_trace_buffer (dpdk_main_t * dm,
220 vlib_node_runtime_t * node,
224 vlib_buffer_t * buffer)
226 vlib_main_t * vm = vlib_get_main();
227 dpdk_tx_dma_trace_t * t0;
228 struct rte_mbuf * mb;
230 mb = rte_mbuf_from_vlib_buffer(buffer);
232 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
233 t0->queue_index = queue_id;
234 t0->device_index = xd->device_index;
235 t0->buffer_index = buffer_index;
236 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
237 clib_memcpy (&t0->buffer, buffer, sizeof (buffer[0]) - sizeof (buffer->pre_data));
238 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
239 sizeof (t0->buffer.pre_data));
243 * This function calls the dpdk's tx_burst function to transmit the packets
244 * on the tx_vector. It manages a lock per-device if the device does not
245 * support multiple queues. It returns the number of packets untransmitted
246 * on the tx_vector. If all packets are transmitted (the normal case), the
247 * function returns 0.
249 * The tx_burst function may not be able to transmit all packets because the
250 * dpdk ring is full. If a flowcontrol callback function has been configured
251 * then the function simply returns. If no callback has been configured, the
252 * function will retry calling tx_burst with the remaining packets. This will
253 * continue until all packets are transmitted or tx_burst indicates no packets
254 * could be transmitted. (The caller can drop the remaining packets.)
256 * The function assumes there is at least one packet on the tx_vector.
259 u32 tx_burst_vector_internal (vlib_main_t * vm,
261 struct rte_mbuf ** tx_vector)
263 dpdk_main_t * dm = &dpdk_main;
272 ring = vec_header(tx_vector, sizeof(*ring));
274 n_packets = ring->tx_head - ring->tx_tail;
276 tx_head = ring->tx_head % DPDK_TX_RING_SIZE;
279 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
280 * unpredictable results.
282 ASSERT(n_packets > 0);
285 * Check for tx_vector overflow. If this fails it is a system configuration
286 * error. The ring should be sized big enough to handle the largest un-flowed
287 * off burst from a traffic manager. A larger size also helps performance
288 * a bit because it decreases the probability of having to issue two tx_burst
289 * calls due to a ring wrap.
291 ASSERT(n_packets < DPDK_TX_RING_SIZE);
294 * If there is no flowcontrol callback, there is only temporary buffering
295 * on the tx_vector and so the tail should always be 0.
297 ASSERT(dm->flowcontrol_callback || ring->tx_tail == 0);
300 * If there is a flowcontrol callback, don't retry any incomplete tx_bursts.
301 * Apply backpressure instead. If there is no callback, keep retrying until
302 * a tx_burst sends no packets. n_retry of 255 essentially means no retry
305 n_retry = dm->flowcontrol_callback ? 0 : 255;
307 queue_id = vm->cpu_index;
310 /* start the burst at the tail */
311 tx_tail = ring->tx_tail % DPDK_TX_RING_SIZE;
314 * This device only supports one TX queue,
315 * and we're running multi-threaded...
317 if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
320 queue_id = queue_id % xd->tx_q_used;
321 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
323 queue_id = (queue_id + 1) % xd->tx_q_used;
326 if (PREDICT_TRUE(xd->dev_type == VNET_DPDK_DEV_ETH))
328 if (PREDICT_TRUE(tx_head > tx_tail))
330 /* no wrap, transmit in one burst */
331 rv = rte_eth_tx_burst(xd->device_index,
334 (uint16_t) (tx_head-tx_tail));
339 * This can only happen if there is a flowcontrol callback.
340 * We need to split the transmit into two calls: one for
341 * the packets up to the wrap point, and one to continue
342 * at the start of the ring.
343 * Transmit pkts up to the wrap point.
345 rv = rte_eth_tx_burst(xd->device_index,
348 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
351 * If we transmitted everything we wanted, then allow 1 retry
352 * so we can try to transmit the rest. If we didn't transmit
353 * everything, stop now.
355 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
358 else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
361 if (xd->need_txlock) {
363 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1));
365 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
367 dpdk_device_and_queue_t * dq;
368 vec_foreach (dq, dm->devices_by_cpu[vm->cpu_index])
370 if (xd->device_index == dq->device)
374 offset = dq->queue_id * VIRTIO_QNUM;
377 if (PREDICT_TRUE(tx_head > tx_tail))
379 int i; u32 bytes = 0;
380 struct rte_mbuf **pkts = &tx_vector[tx_tail];
381 for (i = 0; i < (tx_head - tx_tail); i++) {
382 struct rte_mbuf *buff = pkts[i];
383 bytes += rte_pktmbuf_data_len(buff);
386 /* no wrap, transmit in one burst */
387 rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
389 (uint16_t) (tx_head-tx_tail));
390 if (PREDICT_TRUE(rv > 0))
392 dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
393 vring->packets += rv;
394 vring->bytes += bytes;
396 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
397 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
398 vring->n_since_last_int += rv;
400 f64 now = vlib_time_now (vm);
401 if (vring->int_deadline < now ||
402 vring->n_since_last_int > dm->vhost_coalesce_frames)
403 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
408 rte_pktmbuf_free (tx_vector[tx_tail+c]);
414 * If we transmitted everything we wanted, then allow 1 retry
415 * so we can try to transmit the rest. If we didn't transmit
416 * everything, stop now.
418 int i; u32 bytes = 0;
419 struct rte_mbuf **pkts = &tx_vector[tx_tail];
420 for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++) {
421 struct rte_mbuf *buff = pkts[i];
422 bytes += rte_pktmbuf_data_len(buff);
424 rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
426 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
428 if (PREDICT_TRUE(rv > 0))
430 dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
431 vring->packets += rv;
432 vring->bytes += bytes;
434 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
435 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
436 vring->n_since_last_int += rv;
438 f64 now = vlib_time_now (vm);
439 if (vring->int_deadline < now ||
440 vring->n_since_last_int > dm->vhost_coalesce_frames)
441 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
446 rte_pktmbuf_free (tx_vector[tx_tail+c]);
449 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
453 *xd->lockp[queue_id] = 0;
456 else if (xd->dev_type == VNET_DPDK_DEV_KNI)
458 if (PREDICT_TRUE(tx_head > tx_tail))
460 /* no wrap, transmit in one burst */
461 rv = rte_kni_tx_burst(xd->kni,
463 (uint16_t) (tx_head-tx_tail));
468 * This can only happen if there is a flowcontrol callback.
469 * We need to split the transmit into two calls: one for
470 * the packets up to the wrap point, and one to continue
471 * at the start of the ring.
472 * Transmit pkts up to the wrap point.
474 rv = rte_kni_tx_burst(xd->kni,
476 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
479 * If we transmitted everything we wanted, then allow 1 retry
480 * so we can try to transmit the rest. If we didn't transmit
481 * everything, stop now.
483 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
493 if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
495 *xd->lockp[queue_id] = 0;
497 if (PREDICT_FALSE(rv < 0))
499 // emit non-fatal message, bump counter
500 vnet_main_t * vnm = dm->vnet_main;
501 vnet_interface_main_t * im = &vnm->interface_main;
504 node_index = vec_elt_at_index(im->hw_interfaces,
505 xd->vlib_hw_if_index)->tx_node_index;
507 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
508 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index, rv);
509 return n_packets; // untransmitted packets
511 ring->tx_tail += (u16)rv;
512 n_packets -= (uint16_t) rv;
513 } while (rv && n_packets && (n_retry>0));
520 * This function transmits any packets on the interface's tx_vector and returns
521 * the number of packets untransmitted on the tx_vector. If the tx_vector is
522 * empty the function simply returns 0.
524 * It is intended to be called by a traffic manager which has flowed-off an
525 * interface to see if the interface can be flowed-on again.
527 u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance)
529 dpdk_main_t * dm = &dpdk_main;
532 struct rte_mbuf ** tx_vector;
535 /* param is dev_instance and not hw_if_index to save another lookup */
536 xd = vec_elt_at_index (dm->devices, dev_instance);
538 queue_id = vm->cpu_index;
539 tx_vector = xd->tx_vectors[queue_id];
541 /* If no packets on the ring, don't bother calling tx function */
542 ring = vec_header(tx_vector, sizeof(*ring));
543 if (ring->tx_head == ring->tx_tail)
548 return tx_burst_vector_internal (vm, xd, tx_vector);
552 * Transmits the packets on the frame to the interface associated with the
553 * node. It first copies packets on the frame to a tx_vector containing the
554 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
555 * which calls the dpdk tx_burst function.
557 * The tx_vector is treated slightly differently depending on whether or
558 * not a flowcontrol callback function has been configured. If there is no
559 * callback, the tx_vector is a temporary array of rte_mbuf packet pointers.
560 * Its entries are written and consumed before the function exits.
562 * If there is a callback then the transmit is being invoked in the presence
563 * of a traffic manager. Here the tx_vector is treated like a ring of rte_mbuf
564 * pointers. If not all packets can be transmitted, the untransmitted packets
565 * stay on the tx_vector until the next call. The callback allows the traffic
566 * manager to flow-off dequeues to the interface. The companion function
567 * dpdk_interface_tx_vector() allows the traffic manager to detect when
568 * it should flow-on the interface again.
571 dpdk_interface_tx (vlib_main_t * vm,
572 vlib_node_runtime_t * node,
575 dpdk_main_t * dm = &dpdk_main;
576 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
577 dpdk_device_t * xd = vec_elt_at_index (dm->devices, rd->dev_instance);
578 u32 n_packets = f->n_vectors;
581 struct rte_mbuf ** tx_vector;
589 my_cpu = vm->cpu_index;
593 tx_vector = xd->tx_vectors[queue_id];
594 ring = vec_header(tx_vector, sizeof(*ring));
596 n_on_ring = ring->tx_head - ring->tx_tail;
597 from = vlib_frame_vector_args (f);
599 ASSERT(n_packets <= VLIB_FRAME_SIZE);
601 if (PREDICT_FALSE(n_on_ring + n_packets > DPDK_TX_RING_SIZE))
604 * Overflowing the ring should never happen.
605 * If it does then drop the whole frame.
607 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
612 u32 bi0 = from[n_packets];
613 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
614 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer(b0);
615 rte_pktmbuf_free (mb0);
620 if (PREDICT_FALSE(dm->tx_pcap_enable))
626 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
627 if (dm->pcap_sw_if_index == 0 ||
628 dm->pcap_sw_if_index == vnet_buffer(b0)->sw_if_index [VLIB_TX])
629 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
635 from = vlib_frame_vector_args (f);
637 i = ring->tx_head % DPDK_TX_RING_SIZE;
643 struct rte_mbuf * mb0, * mb1;
644 struct rte_mbuf * prefmb0, * prefmb1;
645 vlib_buffer_t * b0, * b1;
646 vlib_buffer_t * pref0, * pref1;
648 u16 new_data_len0, new_data_len1;
649 u16 new_pkt_len0, new_pkt_len1;
654 pref0 = vlib_get_buffer (vm, pi0);
655 pref1 = vlib_get_buffer (vm, pi1);
657 prefmb0 = rte_mbuf_from_vlib_buffer(pref0);
658 prefmb1 = rte_mbuf_from_vlib_buffer(pref1);
660 CLIB_PREFETCH(prefmb0, CLIB_CACHE_LINE_BYTES, LOAD);
661 CLIB_PREFETCH(pref0, CLIB_CACHE_LINE_BYTES, LOAD);
662 CLIB_PREFETCH(prefmb1, CLIB_CACHE_LINE_BYTES, LOAD);
663 CLIB_PREFETCH(pref1, CLIB_CACHE_LINE_BYTES, LOAD);
669 b0 = vlib_get_buffer (vm, bi0);
670 b1 = vlib_get_buffer (vm, bi1);
672 mb0 = rte_mbuf_from_vlib_buffer(b0);
673 mb1 = rte_mbuf_from_vlib_buffer(b1);
675 any_clone = b0->clone_count | b1->clone_count;
676 if (PREDICT_FALSE(any_clone != 0))
678 if (PREDICT_FALSE(b0->clone_count != 0))
680 struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
681 if (PREDICT_FALSE(mb0_new == 0))
683 vlib_error_count (vm, node->node_index,
684 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
685 b0->flags |= VLIB_BUFFER_REPL_FAIL;
689 vec_add1 (dm->recycle[my_cpu], bi0);
691 if (PREDICT_FALSE(b1->clone_count != 0))
693 struct rte_mbuf * mb1_new = dpdk_replicate_packet_mb (b1);
694 if (PREDICT_FALSE(mb1_new == 0))
696 vlib_error_count (vm, node->node_index,
697 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
698 b1->flags |= VLIB_BUFFER_REPL_FAIL;
702 vec_add1 (dm->recycle[my_cpu], bi1);
706 delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
707 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
708 delta1 = PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
709 vlib_buffer_length_in_chain (vm, b1) - (i16) mb1->pkt_len;
711 new_data_len0 = (u16)((i16) mb0->data_len + delta0);
712 new_data_len1 = (u16)((i16) mb1->data_len + delta1);
713 new_pkt_len0 = (u16)((i16) mb0->pkt_len + delta0);
714 new_pkt_len1 = (u16)((i16) mb1->pkt_len + delta1);
716 b0->current_length = new_data_len0;
717 b1->current_length = new_data_len1;
718 mb0->data_len = new_data_len0;
719 mb1->data_len = new_data_len1;
720 mb0->pkt_len = new_pkt_len0;
721 mb1->pkt_len = new_pkt_len1;
723 mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
724 mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
725 mb1->data_off = (PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL)) ?
726 mb1->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b1->current_data);
728 if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
730 if (b0->flags & VLIB_BUFFER_IS_TRACED)
731 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
732 if (b1->flags & VLIB_BUFFER_IS_TRACED)
733 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
736 if (PREDICT_TRUE(any_clone == 0))
738 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
740 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
745 /* cloning was done, need to check for failure */
746 if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
748 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
751 if (PREDICT_TRUE((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
753 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
763 struct rte_mbuf * mb0;
772 b0 = vlib_get_buffer (vm, bi0);
774 mb0 = rte_mbuf_from_vlib_buffer(b0);
775 if (PREDICT_FALSE(b0->clone_count != 0))
777 struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
778 if (PREDICT_FALSE(mb0_new == 0))
780 vlib_error_count (vm, node->node_index,
781 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
782 b0->flags |= VLIB_BUFFER_REPL_FAIL;
786 vec_add1 (dm->recycle[my_cpu], bi0);
789 delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
790 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
792 new_data_len0 = (u16)((i16) mb0->data_len + delta0);
793 new_pkt_len0 = (u16)((i16) mb0->pkt_len + delta0);
795 b0->current_length = new_data_len0;
796 mb0->data_len = new_data_len0;
797 mb0->pkt_len = new_pkt_len0;
798 mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
799 mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
801 if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
802 if (b0->flags & VLIB_BUFFER_IS_TRACED)
803 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
805 if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
807 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
813 /* account for additional packets in the ring */
814 ring->tx_head += n_packets;
815 n_on_ring = ring->tx_head - ring->tx_tail;
817 /* transmit as many packets as possible */
818 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
821 * tx_pkts is the number of packets successfully transmitted
822 * This is the number originally on ring minus the number remaining on ring
824 tx_pkts = n_on_ring - n_packets;
826 if (PREDICT_FALSE(dm->flowcontrol_callback != 0))
828 if (PREDICT_FALSE(n_packets))
830 /* Callback may want to enable flowcontrol */
831 dm->flowcontrol_callback(vm, xd->vlib_hw_if_index, ring->tx_head - ring->tx_tail);
835 /* Reset head/tail to avoid unnecessary wrap */
842 /* If there is no callback then drop any non-transmitted packets */
843 if (PREDICT_FALSE(n_packets))
845 vlib_simple_counter_main_t * cm;
846 vnet_main_t * vnm = vnet_get_main();
848 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
849 VNET_INTERFACE_COUNTER_TX_ERROR);
851 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, n_packets);
853 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
857 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
860 /* Reset head/tail to avoid unnecessary wrap */
865 /* Recycle replicated buffers */
866 if (PREDICT_FALSE(vec_len(dm->recycle[my_cpu])))
868 vlib_buffer_free (vm, dm->recycle[my_cpu], vec_len(dm->recycle[my_cpu]));
869 _vec_len(dm->recycle[my_cpu]) = 0;
872 ASSERT(ring->tx_head >= ring->tx_tail);
877 static int dpdk_device_renumber (vnet_hw_interface_t * hi,
878 u32 new_dev_instance)
880 dpdk_main_t * dm = &dpdk_main;
881 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
883 if (!xd || xd->dev_type != VNET_DPDK_DEV_VHOST_USER) {
884 clib_warning("cannot renumber non-vhost-user interface (sw_if_index: %d)",
889 xd->vu_if_id = new_dev_instance;
893 static void dpdk_clear_hw_interface_counters (u32 instance)
895 dpdk_main_t * dm = &dpdk_main;
896 dpdk_device_t * xd = vec_elt_at_index (dm->devices, instance);
899 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
900 * therefore fake the stop in the dpdk driver by
901 * silently dropping all of the incoming pkts instead of
902 * stopping the driver / hardware.
904 if (xd->admin_up != 0xff)
907 * Set the "last_cleared_stats" to the current stats, so that
908 * things appear to clear from a display perspective.
910 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
912 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof(xd->stats));
913 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
914 vec_len(xd->last_cleared_xstats) *
915 sizeof(xd->last_cleared_xstats[0]));
920 * Internally rte_eth_xstats_reset() is calling rte_eth_stats_reset(),
921 * so we're only calling xstats_reset() here.
923 rte_eth_xstats_reset (xd->device_index);
924 memset (&xd->stats, 0, sizeof(xd->stats));
925 memset (&xd->last_stats, 0, sizeof (xd->last_stats));
928 if (PREDICT_FALSE(xd->dev_type == VNET_DPDK_DEV_VHOST_USER)) {
930 for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) {
931 xd->vu_intf->vrings[i].packets = 0;
932 xd->vu_intf->vrings[i].bytes = 0;
937 #ifdef RTE_LIBRTE_KNI
939 kni_config_network_if(u8 port_id, u8 if_up)
941 vnet_main_t * vnm = vnet_get_main();
942 dpdk_main_t * dm = &dpdk_main;
946 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
948 clib_warning("unknown interface");
951 xd = vec_elt_at_index (dm->devices, p[0]);
954 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
955 if_up ? VNET_HW_INTERFACE_FLAG_LINK_UP |
956 ETH_LINK_FULL_DUPLEX : 0);
961 kni_change_mtu(u8 port_id, unsigned new_mtu)
963 vnet_main_t * vnm = vnet_get_main();
964 dpdk_main_t * dm = &dpdk_main;
967 vnet_hw_interface_t * hif;
969 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
971 clib_warning("unknown interface");
974 xd = vec_elt_at_index (dm->devices, p[0]);
976 hif = vnet_get_hw_interface (vnm, xd->vlib_hw_if_index);
978 hif->max_packet_bytes = new_mtu;
984 static clib_error_t *
985 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
987 vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
988 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
989 dpdk_main_t * dm = &dpdk_main;
990 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hif->dev_instance);
993 #ifdef RTE_LIBRTE_KNI
994 if (xd->dev_type == VNET_DPDK_DEV_KNI)
998 struct rte_kni_conf conf;
999 struct rte_kni_ops ops;
1000 vlib_main_t * vm = vlib_get_main();
1001 vlib_buffer_main_t * bm = vm->buffer_main;
1002 memset(&conf, 0, sizeof(conf));
1003 snprintf(conf.name, RTE_KNI_NAMESIZE, "vpp%u", xd->kni_port_id);
1004 conf.mbuf_size = VLIB_BUFFER_DATA_SIZE;
1005 memset(&ops, 0, sizeof(ops));
1006 ops.port_id = xd->kni_port_id;
1007 ops.change_mtu = kni_change_mtu;
1008 ops.config_network_if = kni_config_network_if;
1010 xd->kni = rte_kni_alloc(bm->pktmbuf_pools[rte_socket_id()], &conf, &ops);
1013 clib_warning("failed to allocate kni interface");
1017 hif->max_packet_bytes = 1500; /* kni interface default value */
1024 rte_kni_release(xd->kni);
1029 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
1033 if (xd->vu_is_running)
1034 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
1035 VNET_HW_INTERFACE_FLAG_LINK_UP |
1036 ETH_LINK_FULL_DUPLEX );
1041 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
1051 f64 now = vlib_time_now (dm->vlib_main);
1054 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1055 * therefore fake the stop in the dpdk driver by
1056 * silently dropping all of the incoming pkts instead of
1057 * stopping the driver / hardware.
1059 if (xd->admin_up == 0)
1060 rv = rte_eth_dev_start (xd->device_index);
1063 rte_eth_promiscuous_enable(xd->device_index);
1065 rte_eth_promiscuous_disable(xd->device_index);
1067 rte_eth_allmulticast_enable (xd->device_index);
1069 dpdk_update_counters (xd, now);
1070 dpdk_update_link_state (xd, now);
1075 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1076 * therefore fake the stop in the dpdk driver by
1077 * silently dropping all of the incoming pkts instead of
1078 * stopping the driver / hardware.
1080 if (xd->pmd != VNET_DPDK_PMD_VMXNET3)
1085 rte_eth_allmulticast_disable (xd->device_index);
1086 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
1089 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1090 * therefore fake the stop in the dpdk driver by
1091 * silently dropping all of the incoming pkts instead of
1092 * stopping the driver / hardware.
1094 if (xd->pmd != VNET_DPDK_PMD_VMXNET3)
1095 rte_eth_dev_stop (xd->device_index);
1099 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop",
1102 return /* no error */ 0;
1106 * Dynamically redirect all pkts from a specific interface
1107 * to the specified node
1109 static void dpdk_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
1112 dpdk_main_t * xm = &dpdk_main;
1113 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1114 dpdk_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1116 /* Shut off redirection */
1117 if (node_index == ~0)
1119 xd->per_interface_next_index = node_index;
1123 xd->per_interface_next_index =
1124 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
1128 static clib_error_t *
1129 dpdk_subif_add_del_function (vnet_main_t * vnm,
1131 struct vnet_sw_interface_t * st,
1134 dpdk_main_t * xm = &dpdk_main;
1135 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1136 dpdk_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1137 vnet_sw_interface_t * t = (vnet_sw_interface_t *) st;
1138 int r, vlan_offload;
1141 if (xd->dev_type != VNET_DPDK_DEV_ETH)
1144 /* currently we program VLANS only for IXGBE VF and I40E VF */
1145 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) &&
1146 (xd->pmd != VNET_DPDK_PMD_I40EVF))
1149 if (t->sub.eth.flags.no_tags == 1)
1152 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1 ))
1153 return clib_error_return (0, "unsupported VLAN setup");
1156 vlan_offload = rte_eth_dev_get_vlan_offload(xd->device_index);
1157 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1159 if ((r = rte_eth_dev_set_vlan_offload(xd->device_index, vlan_offload)))
1160 return clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
1161 xd->device_index, r);
1164 if ((r = rte_eth_dev_vlan_filter(xd->device_index, t->sub.eth.outer_vlan_id, is_add)))
1165 return clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
1166 xd->device_index, r);
1171 VNET_DEVICE_CLASS (dpdk_device_class) = {
1173 .tx_function = dpdk_interface_tx,
1174 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
1175 .tx_function_error_strings = dpdk_tx_func_error_strings,
1176 .format_device_name = format_dpdk_device_name,
1177 .format_device = format_dpdk_device,
1178 .format_tx_trace = format_dpdk_tx_dma_trace,
1179 .clear_counters = dpdk_clear_hw_interface_counters,
1180 .admin_up_down_function = dpdk_interface_admin_up_down,
1181 .subif_add_del_function = dpdk_subif_add_del_function,
1182 .rx_redirect_to_node = dpdk_set_interface_next_node,
1183 .no_flatten_output_chains = 1,
1184 .name_renumber = dpdk_device_renumber,
1187 void dpdk_set_flowcontrol_callback (vlib_main_t *vm,
1188 dpdk_flowcontrol_callback_t callback)
1190 dpdk_main.flowcontrol_callback = callback;
1193 #define UP_DOWN_FLAG_EVENT 1
1196 u32 dpdk_get_admin_up_down_in_progress (void)
1198 return dpdk_main.admin_up_down_in_progress;
1202 admin_up_down_process (vlib_main_t * vm,
1203 vlib_node_runtime_t * rt,
1206 clib_error_t * error = 0;
1208 uword *event_data = 0;
1214 vlib_process_wait_for_event (vm);
1216 event_type = vlib_process_get_events (vm, &event_data);
1218 dpdk_main.admin_up_down_in_progress = 1;
1220 switch (event_type) {
1221 case UP_DOWN_FLAG_EVENT:
1223 if (vec_len(event_data) == 2) {
1224 sw_if_index = event_data[0];
1225 flags = event_data[1];
1226 error = vnet_sw_interface_set_flags (vnet_get_main(), sw_if_index, flags);
1227 clib_error_report(error);
1233 vec_reset_length (event_data);
1235 dpdk_main.admin_up_down_in_progress = 0;
1238 return 0; /* or not */
1241 VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
1242 .function = admin_up_down_process,
1243 .type = VLIB_NODE_TYPE_PROCESS,
1244 .name = "admin-up-down-process",
1245 .process_log2_n_stack_bytes = 17, // 256KB
1249 * Asynchronously invoke vnet_sw_interface_set_flags via the admin_up_down
1250 * process. Useful for avoiding long blocking delays (>150ms) in the dpdk
1252 * WARNING: when posting this event, no other interface-related calls should
1253 * be made (e.g. vnet_create_sw_interface()) while the event is being
1254 * processed (admin_up_down_in_progress). This is required in order to avoid
1255 * race conditions in manipulating interface data structures.
1257 void post_sw_interface_set_flags (vlib_main_t *vm, u32 sw_if_index, u32 flags)
1259 uword * d = vlib_process_signal_event_data
1260 (vm, admin_up_down_process_node.index,
1261 UP_DOWN_FLAG_EVENT, 2, sizeof(u32));
1267 * Called by the dpdk driver's rte_delay_us() function.
1268 * Return 0 to have the dpdk do a regular delay loop.
1269 * Return 1 if to skip the delay loop because we are suspending
1270 * the calling vlib process instead.
1272 int rte_delay_us_override (unsigned us) {
1275 /* Don't bother intercepting for short delays */
1276 if (us < 10) return 0;
1279 * Only intercept if we are in a vlib process.
1280 * If we are called from a vlib worker thread or the vlib main
1281 * thread then do not intercept. (Must not be called from an
1282 * independent pthread).
1284 if (os_get_cpu_number() == 0)
1287 * We're in the vlib main thread or a vlib process. Make sure
1288 * the process is running and we're not still initializing.
1290 vm = vlib_get_main();
1291 if (vlib_in_process_context(vm))
1293 /* Only suspend for the admin_down_process */
1294 vlib_process_t * proc = vlib_get_current_process(vm);
1295 if (!(proc->flags & VLIB_PROCESS_IS_RUNNING) ||
1296 (proc->node_runtime.function != admin_up_down_process))
1299 f64 delay = 1e-6 * us;
1300 vlib_process_suspend(vm, delay);
1304 return 0; // no override
1308 * Return a copy of the DPDK port stats in dest.
1311 dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats* dest)
1313 dpdk_main_t * dm = &dpdk_main;
1314 vnet_main_t * vnm = vnet_get_main();
1315 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
1316 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1319 return clib_error_return (0, "Missing or NULL argument");
1322 return clib_error_return (0, "Unable to get DPDK device from HW interface");
1325 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
1327 clib_memcpy(dest, &xd->stats, sizeof(xd->stats));
1332 * Return the number of dpdk mbufs
1334 u32 dpdk_num_mbufs (void)
1336 dpdk_main_t * dm = &dpdk_main;
1338 return dm->num_mbufs;
1342 * Return the io_thread_release
1344 int dpdk_io_thread_release (void)
1346 dpdk_main_t * dm = &dpdk_main;
1348 return dm->io_thread_release;
1352 * Return the pmd type for a given hardware interface
1354 dpdk_pmd_t dpdk_get_pmd_type (vnet_hw_interface_t *hi)
1356 dpdk_main_t * dm = &dpdk_main;
1361 xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1369 * Return the cpu socket for a given hardware interface
1371 i8 dpdk_get_cpu_socket (vnet_hw_interface_t *hi)
1373 dpdk_main_t * dm = &dpdk_main;
1378 xd = vec_elt_at_index(dm->devices, hi->dev_instance);
1382 return xd->cpu_socket;