2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
24 #include "dpdk_priv.h"
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
34 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
35 foreach_dpdk_tx_func_error
38 } dpdk_tx_func_error_t;
40 static char * dpdk_tx_func_error_strings[] = {
42 foreach_dpdk_tx_func_error
47 dpdk_set_mac_address (vnet_hw_interface_t * hi, char * address)
50 dpdk_main_t * dm = &dpdk_main;
51 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
53 error=rte_eth_dev_default_mac_addr_set(xd->device_index,
54 (struct ether_addr *) address);
57 return clib_error_return (0, "mac address set failed: %d", error);
64 dpdk_set_mc_filter (vnet_hw_interface_t * hi,
65 struct ether_addr mc_addr_vec[], int naddr)
68 dpdk_main_t * dm = &dpdk_main;
69 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
71 error=rte_eth_dev_set_mc_addr_list(xd->device_index, mc_addr_vec, naddr);
74 return clib_error_return (0, "mc addr list failed: %d", error);
80 static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b)
82 vlib_main_t * vm = vlib_get_main();
83 vlib_buffer_main_t * bm = vm->buffer_main;
84 struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
85 u8 nb_segs, nb_segs_left;
87 unsigned socket_id = rte_socket_id();
89 ASSERT (bm->pktmbuf_pools[socket_id]);
90 pkt_mb = rte_mbuf_from_vlib_buffer(b);
91 nb_segs = pkt_mb->nb_segs;
92 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
94 if (PREDICT_FALSE(pkt_mb == 0))
96 clib_warning ("Missing %d mbuf chain segment(s): "
97 "(nb_segs = %d, nb_segs_left = %d)!",
98 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
100 rte_pktmbuf_free(first_mb);
103 new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
104 if (PREDICT_FALSE(new_mb == 0))
107 rte_pktmbuf_free(first_mb);
112 * Copy packet info into 1st segment.
117 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
118 first_mb->nb_segs = pkt_mb->nb_segs;
119 first_mb->port = pkt_mb->port;
120 #ifdef DAW_FIXME // TX Offload support TBD
121 first_mb->vlan_macip = pkt_mb->vlan_macip;
122 first_mb->hash = pkt_mb->hash;
123 first_mb->ol_flags = pkt_mb->ol_flags
128 ASSERT(prev_mb_next != 0);
129 *prev_mb_next = new_mb;
133 * Copy packet segment data into new mbuf segment.
135 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
136 copy_bytes = pkt_mb->data_len + RTE_PKTMBUF_HEADROOM;
137 ASSERT(copy_bytes <= pkt_mb->buf_len);
138 clib_memcpy(new_mb->buf_addr, pkt_mb->buf_addr, copy_bytes);
140 prev_mb_next = &new_mb->next;
141 pkt_mb = pkt_mb->next;
145 __rte_mbuf_sanity_check(first_mb, 1);
151 dpdk_tx_trace_buffer (dpdk_main_t * dm,
152 vlib_node_runtime_t * node,
156 vlib_buffer_t * buffer)
158 vlib_main_t * vm = vlib_get_main();
159 dpdk_tx_dma_trace_t * t0;
160 struct rte_mbuf * mb;
162 mb = rte_mbuf_from_vlib_buffer(buffer);
164 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
165 t0->queue_index = queue_id;
166 t0->device_index = xd->device_index;
167 t0->buffer_index = buffer_index;
168 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
169 clib_memcpy (&t0->buffer, buffer, sizeof (buffer[0]) - sizeof (buffer->pre_data));
170 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
171 sizeof (t0->buffer.pre_data));
175 * This function calls the dpdk's tx_burst function to transmit the packets
176 * on the tx_vector. It manages a lock per-device if the device does not
177 * support multiple queues. It returns the number of packets untransmitted
178 * on the tx_vector. If all packets are transmitted (the normal case), the
179 * function returns 0.
181 * The tx_burst function may not be able to transmit all packets because the
182 * dpdk ring is full. If a flowcontrol callback function has been configured
183 * then the function simply returns. If no callback has been configured, the
184 * function will retry calling tx_burst with the remaining packets. This will
185 * continue until all packets are transmitted or tx_burst indicates no packets
186 * could be transmitted. (The caller can drop the remaining packets.)
188 * The function assumes there is at least one packet on the tx_vector.
191 u32 tx_burst_vector_internal (vlib_main_t * vm,
193 struct rte_mbuf ** tx_vector)
195 dpdk_main_t * dm = &dpdk_main;
204 ring = vec_header(tx_vector, sizeof(*ring));
206 n_packets = ring->tx_head - ring->tx_tail;
208 tx_head = ring->tx_head % DPDK_TX_RING_SIZE;
211 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
212 * unpredictable results.
214 ASSERT(n_packets > 0);
217 * Check for tx_vector overflow. If this fails it is a system configuration
218 * error. The ring should be sized big enough to handle the largest un-flowed
219 * off burst from a traffic manager. A larger size also helps performance
220 * a bit because it decreases the probability of having to issue two tx_burst
221 * calls due to a ring wrap.
223 ASSERT(n_packets < DPDK_TX_RING_SIZE);
226 * If there is no flowcontrol callback, there is only temporary buffering
227 * on the tx_vector and so the tail should always be 0.
229 ASSERT(dm->flowcontrol_callback || ring->tx_tail == 0);
232 * If there is a flowcontrol callback, don't retry any incomplete tx_bursts.
233 * Apply backpressure instead. If there is no callback, keep retrying until
234 * a tx_burst sends no packets. n_retry of 255 essentially means no retry
237 n_retry = dm->flowcontrol_callback ? 0 : 255;
239 queue_id = vm->cpu_index;
242 /* start the burst at the tail */
243 tx_tail = ring->tx_tail % DPDK_TX_RING_SIZE;
246 * This device only supports one TX queue,
247 * and we're running multi-threaded...
249 if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
252 queue_id = queue_id % xd->tx_q_used;
253 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
255 queue_id = (queue_id + 1) % xd->tx_q_used;
258 if (PREDICT_TRUE(xd->dev_type == VNET_DPDK_DEV_ETH))
260 if (PREDICT_TRUE(tx_head > tx_tail))
262 /* no wrap, transmit in one burst */
263 rv = rte_eth_tx_burst(xd->device_index,
266 (uint16_t) (tx_head-tx_tail));
271 * This can only happen if there is a flowcontrol callback.
272 * We need to split the transmit into two calls: one for
273 * the packets up to the wrap point, and one to continue
274 * at the start of the ring.
275 * Transmit pkts up to the wrap point.
277 rv = rte_eth_tx_burst(xd->device_index,
280 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
283 * If we transmitted everything we wanted, then allow 1 retry
284 * so we can try to transmit the rest. If we didn't transmit
285 * everything, stop now.
287 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
290 else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
293 if (xd->need_txlock) {
295 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1));
297 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
299 dpdk_device_and_queue_t * dq;
300 vec_foreach (dq, dm->devices_by_cpu[vm->cpu_index])
302 if (xd->device_index == dq->device)
306 offset = dq->queue_id * VIRTIO_QNUM;
309 if (PREDICT_TRUE(tx_head > tx_tail))
311 int i; u32 bytes = 0;
312 struct rte_mbuf **pkts = &tx_vector[tx_tail];
313 for (i = 0; i < (tx_head - tx_tail); i++) {
314 struct rte_mbuf *buff = pkts[i];
315 bytes += rte_pktmbuf_data_len(buff);
318 /* no wrap, transmit in one burst */
319 rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
321 (uint16_t) (tx_head-tx_tail));
322 if (PREDICT_TRUE(rv > 0))
324 dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
325 vring->packets += rv;
326 vring->bytes += bytes;
328 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
329 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
330 vring->n_since_last_int += rv;
332 f64 now = vlib_time_now (vm);
333 if (vring->int_deadline < now ||
334 vring->n_since_last_int > dm->vhost_coalesce_frames)
335 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
340 rte_pktmbuf_free (tx_vector[tx_tail+c]);
346 * If we transmitted everything we wanted, then allow 1 retry
347 * so we can try to transmit the rest. If we didn't transmit
348 * everything, stop now.
350 int i; u32 bytes = 0;
351 struct rte_mbuf **pkts = &tx_vector[tx_tail];
352 for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++) {
353 struct rte_mbuf *buff = pkts[i];
354 bytes += rte_pktmbuf_data_len(buff);
356 rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
358 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
360 if (PREDICT_TRUE(rv > 0))
362 dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
363 vring->packets += rv;
364 vring->bytes += bytes;
366 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
367 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
368 vring->n_since_last_int += rv;
370 f64 now = vlib_time_now (vm);
371 if (vring->int_deadline < now ||
372 vring->n_since_last_int > dm->vhost_coalesce_frames)
373 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
378 rte_pktmbuf_free (tx_vector[tx_tail+c]);
381 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
385 *xd->lockp[queue_id] = 0;
388 else if (xd->dev_type == VNET_DPDK_DEV_KNI)
390 if (PREDICT_TRUE(tx_head > tx_tail))
392 /* no wrap, transmit in one burst */
393 rv = rte_kni_tx_burst(xd->kni,
395 (uint16_t) (tx_head-tx_tail));
400 * This can only happen if there is a flowcontrol callback.
401 * We need to split the transmit into two calls: one for
402 * the packets up to the wrap point, and one to continue
403 * at the start of the ring.
404 * Transmit pkts up to the wrap point.
406 rv = rte_kni_tx_burst(xd->kni,
408 (uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
411 * If we transmitted everything we wanted, then allow 1 retry
412 * so we can try to transmit the rest. If we didn't transmit
413 * everything, stop now.
415 n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
425 if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
427 *xd->lockp[queue_id] = 0;
429 if (PREDICT_FALSE(rv < 0))
431 // emit non-fatal message, bump counter
432 vnet_main_t * vnm = dm->vnet_main;
433 vnet_interface_main_t * im = &vnm->interface_main;
436 node_index = vec_elt_at_index(im->hw_interfaces,
437 xd->vlib_hw_if_index)->tx_node_index;
439 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
440 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index, rv);
441 return n_packets; // untransmitted packets
443 ring->tx_tail += (u16)rv;
444 n_packets -= (uint16_t) rv;
445 } while (rv && n_packets && (n_retry>0));
452 * This function transmits any packets on the interface's tx_vector and returns
453 * the number of packets untransmitted on the tx_vector. If the tx_vector is
454 * empty the function simply returns 0.
456 * It is intended to be called by a traffic manager which has flowed-off an
457 * interface to see if the interface can be flowed-on again.
459 u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance)
461 dpdk_main_t * dm = &dpdk_main;
464 struct rte_mbuf ** tx_vector;
467 /* param is dev_instance and not hw_if_index to save another lookup */
468 xd = vec_elt_at_index (dm->devices, dev_instance);
470 queue_id = vm->cpu_index;
471 tx_vector = xd->tx_vectors[queue_id];
473 /* If no packets on the ring, don't bother calling tx function */
474 ring = vec_header(tx_vector, sizeof(*ring));
475 if (ring->tx_head == ring->tx_tail)
480 return tx_burst_vector_internal (vm, xd, tx_vector);
484 * Transmits the packets on the frame to the interface associated with the
485 * node. It first copies packets on the frame to a tx_vector containing the
486 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
487 * which calls the dpdk tx_burst function.
489 * The tx_vector is treated slightly differently depending on whether or
490 * not a flowcontrol callback function has been configured. If there is no
491 * callback, the tx_vector is a temporary array of rte_mbuf packet pointers.
492 * Its entries are written and consumed before the function exits.
494 * If there is a callback then the transmit is being invoked in the presence
495 * of a traffic manager. Here the tx_vector is treated like a ring of rte_mbuf
496 * pointers. If not all packets can be transmitted, the untransmitted packets
497 * stay on the tx_vector until the next call. The callback allows the traffic
498 * manager to flow-off dequeues to the interface. The companion function
499 * dpdk_interface_tx_vector() allows the traffic manager to detect when
500 * it should flow-on the interface again.
503 dpdk_interface_tx (vlib_main_t * vm,
504 vlib_node_runtime_t * node,
507 dpdk_main_t * dm = &dpdk_main;
508 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
509 dpdk_device_t * xd = vec_elt_at_index (dm->devices, rd->dev_instance);
510 u32 n_packets = f->n_vectors;
513 struct rte_mbuf ** tx_vector;
521 my_cpu = vm->cpu_index;
525 tx_vector = xd->tx_vectors[queue_id];
526 ring = vec_header(tx_vector, sizeof(*ring));
528 n_on_ring = ring->tx_head - ring->tx_tail;
529 from = vlib_frame_vector_args (f);
531 ASSERT(n_packets <= VLIB_FRAME_SIZE);
533 if (PREDICT_FALSE(n_on_ring + n_packets > DPDK_TX_RING_SIZE))
536 * Overflowing the ring should never happen.
537 * If it does then drop the whole frame.
539 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
544 u32 bi0 = from[n_packets];
545 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
546 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer(b0);
547 rte_pktmbuf_free (mb0);
552 if (PREDICT_FALSE(dm->tx_pcap_enable))
558 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
559 if (dm->pcap_sw_if_index == 0 ||
560 dm->pcap_sw_if_index == vnet_buffer(b0)->sw_if_index [VLIB_TX])
561 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
567 from = vlib_frame_vector_args (f);
569 i = ring->tx_head % DPDK_TX_RING_SIZE;
575 struct rte_mbuf * mb0, * mb1;
576 struct rte_mbuf * prefmb0, * prefmb1;
577 vlib_buffer_t * b0, * b1;
578 vlib_buffer_t * pref0, * pref1;
580 u16 new_data_len0, new_data_len1;
581 u16 new_pkt_len0, new_pkt_len1;
586 pref0 = vlib_get_buffer (vm, pi0);
587 pref1 = vlib_get_buffer (vm, pi1);
589 prefmb0 = rte_mbuf_from_vlib_buffer(pref0);
590 prefmb1 = rte_mbuf_from_vlib_buffer(pref1);
592 CLIB_PREFETCH(prefmb0, CLIB_CACHE_LINE_BYTES, LOAD);
593 CLIB_PREFETCH(pref0, CLIB_CACHE_LINE_BYTES, LOAD);
594 CLIB_PREFETCH(prefmb1, CLIB_CACHE_LINE_BYTES, LOAD);
595 CLIB_PREFETCH(pref1, CLIB_CACHE_LINE_BYTES, LOAD);
601 b0 = vlib_get_buffer (vm, bi0);
602 b1 = vlib_get_buffer (vm, bi1);
604 mb0 = rte_mbuf_from_vlib_buffer(b0);
605 mb1 = rte_mbuf_from_vlib_buffer(b1);
607 any_clone = b0->clone_count | b1->clone_count;
608 if (PREDICT_FALSE(any_clone != 0))
610 if (PREDICT_FALSE(b0->clone_count != 0))
612 struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
613 if (PREDICT_FALSE(mb0_new == 0))
615 vlib_error_count (vm, node->node_index,
616 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
617 b0->flags |= VLIB_BUFFER_REPL_FAIL;
621 vec_add1 (dm->recycle[my_cpu], bi0);
623 if (PREDICT_FALSE(b1->clone_count != 0))
625 struct rte_mbuf * mb1_new = dpdk_replicate_packet_mb (b1);
626 if (PREDICT_FALSE(mb1_new == 0))
628 vlib_error_count (vm, node->node_index,
629 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
630 b1->flags |= VLIB_BUFFER_REPL_FAIL;
634 vec_add1 (dm->recycle[my_cpu], bi1);
638 delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
639 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
640 delta1 = PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
641 vlib_buffer_length_in_chain (vm, b1) - (i16) mb1->pkt_len;
643 new_data_len0 = (u16)((i16) mb0->data_len + delta0);
644 new_data_len1 = (u16)((i16) mb1->data_len + delta1);
645 new_pkt_len0 = (u16)((i16) mb0->pkt_len + delta0);
646 new_pkt_len1 = (u16)((i16) mb1->pkt_len + delta1);
648 b0->current_length = new_data_len0;
649 b1->current_length = new_data_len1;
650 mb0->data_len = new_data_len0;
651 mb1->data_len = new_data_len1;
652 mb0->pkt_len = new_pkt_len0;
653 mb1->pkt_len = new_pkt_len1;
655 mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
656 mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
657 mb1->data_off = (PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL)) ?
658 mb1->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b1->current_data);
660 if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
662 if (b0->flags & VLIB_BUFFER_IS_TRACED)
663 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
664 if (b1->flags & VLIB_BUFFER_IS_TRACED)
665 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
668 if (PREDICT_TRUE(any_clone == 0))
670 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
672 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
677 /* cloning was done, need to check for failure */
678 if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
680 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
683 if (PREDICT_TRUE((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
685 tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
695 struct rte_mbuf * mb0;
704 b0 = vlib_get_buffer (vm, bi0);
706 mb0 = rte_mbuf_from_vlib_buffer(b0);
707 if (PREDICT_FALSE(b0->clone_count != 0))
709 struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
710 if (PREDICT_FALSE(mb0_new == 0))
712 vlib_error_count (vm, node->node_index,
713 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
714 b0->flags |= VLIB_BUFFER_REPL_FAIL;
718 vec_add1 (dm->recycle[my_cpu], bi0);
721 delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
722 vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
724 new_data_len0 = (u16)((i16) mb0->data_len + delta0);
725 new_pkt_len0 = (u16)((i16) mb0->pkt_len + delta0);
727 b0->current_length = new_data_len0;
728 mb0->data_len = new_data_len0;
729 mb0->pkt_len = new_pkt_len0;
730 mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
731 mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
733 if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
734 if (b0->flags & VLIB_BUFFER_IS_TRACED)
735 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
737 if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
739 tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
745 /* account for additional packets in the ring */
746 ring->tx_head += n_packets;
747 n_on_ring = ring->tx_head - ring->tx_tail;
749 /* transmit as many packets as possible */
750 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
753 * tx_pkts is the number of packets successfully transmitted
754 * This is the number originally on ring minus the number remaining on ring
756 tx_pkts = n_on_ring - n_packets;
758 if (PREDICT_FALSE(dm->flowcontrol_callback != 0))
760 if (PREDICT_FALSE(n_packets))
762 /* Callback may want to enable flowcontrol */
763 dm->flowcontrol_callback(vm, xd->vlib_hw_if_index, ring->tx_head - ring->tx_tail);
767 /* Reset head/tail to avoid unnecessary wrap */
774 /* If there is no callback then drop any non-transmitted packets */
775 if (PREDICT_FALSE(n_packets))
777 vlib_simple_counter_main_t * cm;
778 vnet_main_t * vnm = vnet_get_main();
780 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
781 VNET_INTERFACE_COUNTER_TX_ERROR);
783 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, n_packets);
785 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
789 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
792 /* Reset head/tail to avoid unnecessary wrap */
797 /* Recycle replicated buffers */
798 if (PREDICT_FALSE(vec_len(dm->recycle[my_cpu])))
800 vlib_buffer_free (vm, dm->recycle[my_cpu], vec_len(dm->recycle[my_cpu]));
801 _vec_len(dm->recycle[my_cpu]) = 0;
804 ASSERT(ring->tx_head >= ring->tx_tail);
809 static int dpdk_device_renumber (vnet_hw_interface_t * hi,
810 u32 new_dev_instance)
812 dpdk_main_t * dm = &dpdk_main;
813 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
815 if (!xd || xd->dev_type != VNET_DPDK_DEV_VHOST_USER) {
816 clib_warning("cannot renumber non-vhost-user interface (sw_if_index: %d)",
821 xd->vu_if_id = new_dev_instance;
825 static void dpdk_clear_hw_interface_counters (u32 instance)
827 dpdk_main_t * dm = &dpdk_main;
828 dpdk_device_t * xd = vec_elt_at_index (dm->devices, instance);
831 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
832 * therefore fake the stop in the dpdk driver by
833 * silently dropping all of the incoming pkts instead of
834 * stopping the driver / hardware.
836 if (xd->admin_up != 0xff)
839 * Set the "last_cleared_stats" to the current stats, so that
840 * things appear to clear from a display perspective.
842 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
844 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof(xd->stats));
845 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
846 vec_len(xd->last_cleared_xstats) *
847 sizeof(xd->last_cleared_xstats[0]));
852 * Internally rte_eth_xstats_reset() is calling rte_eth_stats_reset(),
853 * so we're only calling xstats_reset() here.
855 rte_eth_xstats_reset (xd->device_index);
856 memset (&xd->stats, 0, sizeof(xd->stats));
857 memset (&xd->last_stats, 0, sizeof (xd->last_stats));
860 if (PREDICT_FALSE(xd->dev_type == VNET_DPDK_DEV_VHOST_USER)) {
862 for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) {
863 xd->vu_intf->vrings[i].packets = 0;
864 xd->vu_intf->vrings[i].bytes = 0;
869 #ifdef RTE_LIBRTE_KNI
871 kni_config_network_if(u8 port_id, u8 if_up)
873 vnet_main_t * vnm = vnet_get_main();
874 dpdk_main_t * dm = &dpdk_main;
878 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
880 clib_warning("unknown interface");
883 xd = vec_elt_at_index (dm->devices, p[0]);
886 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
887 if_up ? VNET_HW_INTERFACE_FLAG_LINK_UP |
888 ETH_LINK_FULL_DUPLEX : 0);
893 kni_change_mtu(u8 port_id, unsigned new_mtu)
895 vnet_main_t * vnm = vnet_get_main();
896 dpdk_main_t * dm = &dpdk_main;
899 vnet_hw_interface_t * hif;
901 p = hash_get (dm->dpdk_device_by_kni_port_id, port_id);
903 clib_warning("unknown interface");
906 xd = vec_elt_at_index (dm->devices, p[0]);
908 hif = vnet_get_hw_interface (vnm, xd->vlib_hw_if_index);
910 hif->max_packet_bytes = new_mtu;
916 static clib_error_t *
917 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
919 vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
920 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
921 dpdk_main_t * dm = &dpdk_main;
922 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hif->dev_instance);
925 #ifdef RTE_LIBRTE_KNI
926 if (xd->dev_type == VNET_DPDK_DEV_KNI)
930 struct rte_kni_conf conf;
931 struct rte_kni_ops ops;
932 vlib_main_t * vm = vlib_get_main();
933 vlib_buffer_main_t * bm = vm->buffer_main;
934 memset(&conf, 0, sizeof(conf));
935 snprintf(conf.name, RTE_KNI_NAMESIZE, "vpp%u", xd->kni_port_id);
936 conf.mbuf_size = VLIB_BUFFER_DATA_SIZE;
937 memset(&ops, 0, sizeof(ops));
938 ops.port_id = xd->kni_port_id;
939 ops.change_mtu = kni_change_mtu;
940 ops.config_network_if = kni_config_network_if;
942 xd->kni = rte_kni_alloc(bm->pktmbuf_pools[rte_socket_id()], &conf, &ops);
945 clib_warning("failed to allocate kni interface");
949 hif->max_packet_bytes = 1500; /* kni interface default value */
956 rte_kni_release(xd->kni);
961 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
965 if (xd->vu_is_running)
966 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
967 VNET_HW_INTERFACE_FLAG_LINK_UP |
968 ETH_LINK_FULL_DUPLEX );
973 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
983 f64 now = vlib_time_now (dm->vlib_main);
986 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
987 * therefore fake the stop in the dpdk driver by
988 * silently dropping all of the incoming pkts instead of
989 * stopping the driver / hardware.
991 if (xd->admin_up == 0)
992 rv = rte_eth_dev_start (xd->device_index);
995 rte_eth_promiscuous_enable(xd->device_index);
997 rte_eth_promiscuous_disable(xd->device_index);
999 rte_eth_allmulticast_enable (xd->device_index);
1001 dpdk_update_counters (xd, now);
1002 dpdk_update_link_state (xd, now);
1007 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1008 * therefore fake the stop in the dpdk driver by
1009 * silently dropping all of the incoming pkts instead of
1010 * stopping the driver / hardware.
1012 if (xd->pmd != VNET_DPDK_PMD_VMXNET3)
1017 rte_eth_allmulticast_disable (xd->device_index);
1018 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
1021 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1022 * therefore fake the stop in the dpdk driver by
1023 * silently dropping all of the incoming pkts instead of
1024 * stopping the driver / hardware.
1026 if (xd->pmd != VNET_DPDK_PMD_VMXNET3)
1027 rte_eth_dev_stop (xd->device_index);
1031 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop",
1034 return /* no error */ 0;
1038 * Dynamically redirect all pkts from a specific interface
1039 * to the specified node
1041 static void dpdk_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
1044 dpdk_main_t * xm = &dpdk_main;
1045 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1046 dpdk_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1048 /* Shut off redirection */
1049 if (node_index == ~0)
1051 xd->per_interface_next_index = node_index;
1055 xd->per_interface_next_index =
1056 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
1060 static clib_error_t *
1061 dpdk_subif_add_del_function (vnet_main_t * vnm,
1063 struct vnet_sw_interface_t * st,
1066 dpdk_main_t * xm = &dpdk_main;
1067 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1068 dpdk_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
1069 vnet_sw_interface_t * t = (vnet_sw_interface_t *) st;
1070 int r, vlan_offload;
1073 if (xd->dev_type != VNET_DPDK_DEV_ETH)
1076 /* currently we program VLANS only for IXGBE VF and I40E VF */
1077 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) &&
1078 (xd->pmd != VNET_DPDK_PMD_I40EVF))
1081 if (t->sub.eth.flags.no_tags == 1)
1084 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1 ))
1085 return clib_error_return (0, "unsupported VLAN setup");
1088 vlan_offload = rte_eth_dev_get_vlan_offload(xd->device_index);
1089 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1091 if ((r = rte_eth_dev_set_vlan_offload(xd->device_index, vlan_offload)))
1092 return clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
1093 xd->device_index, r);
1096 if ((r = rte_eth_dev_vlan_filter(xd->device_index, t->sub.eth.outer_vlan_id, is_add)))
1097 return clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
1098 xd->device_index, r);
1103 VNET_DEVICE_CLASS (dpdk_device_class) = {
1105 .tx_function = dpdk_interface_tx,
1106 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
1107 .tx_function_error_strings = dpdk_tx_func_error_strings,
1108 .format_device_name = format_dpdk_device_name,
1109 .format_device = format_dpdk_device,
1110 .format_tx_trace = format_dpdk_tx_dma_trace,
1111 .clear_counters = dpdk_clear_hw_interface_counters,
1112 .admin_up_down_function = dpdk_interface_admin_up_down,
1113 .subif_add_del_function = dpdk_subif_add_del_function,
1114 .rx_redirect_to_node = dpdk_set_interface_next_node,
1115 .no_flatten_output_chains = 1,
1116 .name_renumber = dpdk_device_renumber,
1119 void dpdk_set_flowcontrol_callback (vlib_main_t *vm,
1120 dpdk_flowcontrol_callback_t callback)
1122 dpdk_main.flowcontrol_callback = callback;
1125 #define UP_DOWN_FLAG_EVENT 1
1128 u32 dpdk_get_admin_up_down_in_progress (void)
1130 return dpdk_main.admin_up_down_in_progress;
1134 admin_up_down_process (vlib_main_t * vm,
1135 vlib_node_runtime_t * rt,
1138 clib_error_t * error = 0;
1140 uword *event_data = 0;
1147 vlib_process_wait_for_event (vm);
1149 event_type = vlib_process_get_events (vm, &event_data);
1151 dpdk_main.admin_up_down_in_progress = 1;
1153 for (index=0; index<vec_len(event_data); index++)
1155 sw_if_index = event_data[index] >> 32;
1156 flags = (u32) event_data[index];
1158 switch (event_type) {
1159 case UP_DOWN_FLAG_EVENT:
1160 error = vnet_sw_interface_set_flags (vnet_get_main(), sw_if_index, flags);
1161 clib_error_report(error);
1166 vec_reset_length (event_data);
1168 dpdk_main.admin_up_down_in_progress = 0;
1171 return 0; /* or not */
1174 VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
1175 .function = admin_up_down_process,
1176 .type = VLIB_NODE_TYPE_PROCESS,
1177 .name = "admin-up-down-process",
1178 .process_log2_n_stack_bytes = 17, // 256KB
1182 * Asynchronously invoke vnet_sw_interface_set_flags via the admin_up_down
1183 * process. Useful for avoiding long blocking delays (>150ms) in the dpdk
1185 * WARNING: when posting this event, no other interface-related calls should
1186 * be made (e.g. vnet_create_sw_interface()) while the event is being
1187 * processed (admin_up_down_in_progress). This is required in order to avoid
1188 * race conditions in manipulating interface data structures.
1190 void post_sw_interface_set_flags (vlib_main_t *vm, u32 sw_if_index, u32 flags)
1192 vlib_process_signal_event
1193 (vm, admin_up_down_process_node.index,
1195 (((uword)sw_if_index << 32) | flags));
1199 * Called by the dpdk driver's rte_delay_us() function.
1200 * Return 0 to have the dpdk do a regular delay loop.
1201 * Return 1 if to skip the delay loop because we are suspending
1202 * the calling vlib process instead.
1204 int rte_delay_us_override (unsigned us) {
1207 /* Don't bother intercepting for short delays */
1208 if (us < 10) return 0;
1211 * Only intercept if we are in a vlib process.
1212 * If we are called from a vlib worker thread or the vlib main
1213 * thread then do not intercept. (Must not be called from an
1214 * independent pthread).
1216 if (os_get_cpu_number() == 0)
1219 * We're in the vlib main thread or a vlib process. Make sure
1220 * the process is running and we're not still initializing.
1222 vm = vlib_get_main();
1223 if (vlib_in_process_context(vm))
1225 /* Only suspend for the admin_down_process */
1226 vlib_process_t * proc = vlib_get_current_process(vm);
1227 if (!(proc->flags & VLIB_PROCESS_IS_RUNNING) ||
1228 (proc->node_runtime.function != admin_up_down_process))
1231 f64 delay = 1e-6 * us;
1232 vlib_process_suspend(vm, delay);
1236 return 0; // no override
1240 * Return a copy of the DPDK port stats in dest.
1243 dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats* dest)
1245 dpdk_main_t * dm = &dpdk_main;
1246 vnet_main_t * vnm = vnet_get_main();
1247 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
1248 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1251 return clib_error_return (0, "Missing or NULL argument");
1254 return clib_error_return (0, "Unable to get DPDK device from HW interface");
1257 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
1259 clib_memcpy(dest, &xd->stats, sizeof(xd->stats));
1264 * Return the number of dpdk mbufs
1266 u32 dpdk_num_mbufs (void)
1268 dpdk_main_t * dm = &dpdk_main;
1270 return dm->num_mbufs;
1274 * Return the io_thread_release
1276 int dpdk_io_thread_release (void)
1278 dpdk_main_t * dm = &dpdk_main;
1280 return dm->io_thread_release;
1284 * Return the pmd type for a given hardware interface
1286 dpdk_pmd_t dpdk_get_pmd_type (vnet_hw_interface_t *hi)
1288 dpdk_main_t * dm = &dpdk_main;
1293 xd = vec_elt_at_index (dm->devices, hi->dev_instance);
1301 * Return the cpu socket for a given hardware interface
1303 i8 dpdk_get_cpu_socket (vnet_hw_interface_t *hi)
1305 dpdk_main_t * dm = &dpdk_main;
1310 xd = vec_elt_at_index(dm->devices, hi->dev_instance);
1314 return xd->cpu_socket;