2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
33 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
34 foreach_dpdk_tx_func_error
37 } dpdk_tx_func_error_t;
39 static char *dpdk_tx_func_error_strings[] = {
41 foreach_dpdk_tx_func_error
46 dpdk_add_del_mac_address (vnet_hw_interface_t * hi,
47 const u8 * address, u8 is_add)
50 dpdk_main_t *dm = &dpdk_main;
51 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
54 error = rte_eth_dev_mac_addr_add (xd->port_id,
55 (struct rte_ether_addr *) address, 0);
57 error = rte_eth_dev_mac_addr_remove (xd->port_id,
58 (struct rte_ether_addr *) address);
62 return clib_error_return (0, "mac address add/del failed: %d", error);
69 dpdk_set_mac_address (vnet_hw_interface_t * hi,
70 const u8 * old_address, const u8 * address)
73 dpdk_main_t *dm = &dpdk_main;
74 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
76 error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address);
80 return clib_error_return (0, "mac address set failed: %d", error);
84 vec_reset_length (xd->default_mac_address);
85 vec_add (xd->default_mac_address, address, sizeof (address));
91 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
92 dpdk_device_t * xd, u16 queue_id,
93 vlib_buffer_t * buffer)
95 vlib_main_t *vm = vlib_get_main ();
99 mb = rte_mbuf_from_vlib_buffer (buffer);
101 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
102 t0->queue_index = queue_id;
103 t0->device_index = xd->device_index;
104 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
105 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
106 clib_memcpy_fast (&t0->buffer, buffer,
107 sizeof (buffer[0]) - sizeof (buffer->pre_data));
108 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
109 sizeof (t0->buffer.pre_data));
110 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
114 static_always_inline void
115 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
118 struct rte_mbuf *mb, *first_mb, *last_mb;
119 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
121 /* buffer is coming from non-dpdk source so we need to init
123 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
124 rte_pktmbuf_reset (mb);
126 first_mb->nb_segs = 1;
127 mb->data_len = b->current_length;
128 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
130 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
132 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
134 b = vlib_get_buffer (vm, b->next_buffer);
135 mb = rte_mbuf_from_vlib_buffer (b);
136 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
137 rte_pktmbuf_reset (mb);
140 mb->data_len = b->current_length;
141 mb->pkt_len = b->current_length;
142 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
144 if (PREDICT_FALSE (b->ref_count > 1))
146 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
151 * This function calls the dpdk's tx_burst function to transmit the packets.
152 * It manages a lock per-device if the device does not
153 * support multiple queues. It returns the number of packets untransmitted
154 * If all packets are transmitted (the normal case), the function returns 0.
157 u32 tx_burst_vector_internal (vlib_main_t * vm,
159 struct rte_mbuf **mb, u32 n_left)
161 dpdk_main_t *dm = &dpdk_main;
167 queue_id = vm->thread_index;
172 * This device only supports one TX queue,
173 * and we're running multi-threaded...
175 if (PREDICT_FALSE (xd->lockp != 0))
177 queue_id = queue_id % xd->tx_q_used;
178 while (clib_atomic_test_and_set (xd->lockp[queue_id]))
180 queue_id = (queue_id + 1) % xd->tx_q_used;
184 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
186 /* no wrap, transmit in one burst */
187 dpdk_device_hqos_per_worker_thread_t *hqos =
188 &xd->hqos_wt[vm->thread_index];
190 ASSERT (hqos->swq != NULL);
192 dpdk_hqos_metadata_set (hqos, mb, n_left);
193 n_sent = rte_ring_sp_enqueue_burst (hqos->swq, (void **) mb,
198 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
200 /* no wrap, transmit in one burst */
201 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
209 if (PREDICT_FALSE (xd->lockp != 0))
210 clib_atomic_release (xd->lockp[queue_id]);
212 if (PREDICT_FALSE (n_sent < 0))
214 // emit non-fatal message, bump counter
215 vnet_main_t *vnm = dm->vnet_main;
216 vnet_interface_main_t *im = &vnm->interface_main;
219 node_index = vec_elt_at_index (im->hw_interfaces,
220 xd->hw_if_index)->tx_node_index;
222 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
223 clib_warning ("rte_eth_tx_burst[%d]: error %d",
224 xd->port_id, n_sent);
225 return n_left; // untransmitted packets
230 while (n_sent && n_left && (n_retry > 0));
235 static_always_inline void
236 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
238 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
239 CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE);
240 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
243 static_always_inline void
244 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
247 u32 ip_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
248 u32 tcp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
249 u32 udp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
250 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
251 u32 tso = b->flags & VNET_BUFFER_F_GSO;
254 /* Is there any work for us? */
255 if (PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum | tso) == 0))
258 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
259 mb->l3_len = vnet_buffer (b)->l4_hdr_offset -
260 vnet_buffer (b)->l3_hdr_offset;
261 mb->outer_l3_len = 0;
262 mb->outer_l2_len = 0;
263 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
264 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
265 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
266 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
267 ol_flags |= tso ? (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG) : 0;
271 mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz;
272 mb->tso_segsz = vnet_buffer2 (b)->gso_size;
275 mb->ol_flags |= ol_flags;
277 /* we are trying to help compiler here by using local ol_flags with known
278 state of all flags */
279 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
280 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
284 * Transmits the packets on the frame to the interface associated with the
285 * node. It first copies packets on the frame to a per-thread arrays
286 * containing the rte_mbuf pointers.
288 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
289 vlib_node_runtime_t * node,
292 dpdk_main_t *dm = &dpdk_main;
293 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
294 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
295 u32 n_packets = f->n_vectors;
297 u32 thread_index = vm->thread_index;
298 int queue_id = thread_index;
299 u32 tx_pkts = 0, all_or_flags = 0;
300 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
302 struct rte_mbuf **mb;
305 ASSERT (n_packets <= VLIB_FRAME_SIZE);
307 /* calculate rte_mbuf pointers out of buffer indices */
308 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
309 (void **) ptd->mbufs, n_packets,
310 -(i32) sizeof (struct rte_mbuf));
315 #if (CLIB_N_PREFETCHES >= 8)
320 dpdk_prefetch_buffer (vm, mb[4]);
321 dpdk_prefetch_buffer (vm, mb[5]);
322 dpdk_prefetch_buffer (vm, mb[6]);
323 dpdk_prefetch_buffer (vm, mb[7]);
325 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
326 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
327 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
328 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
330 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
331 all_or_flags |= or_flags;
333 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
334 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
335 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
336 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
338 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
340 dpdk_validate_rte_mbuf (vm, b[0], 1);
341 dpdk_validate_rte_mbuf (vm, b[1], 1);
342 dpdk_validate_rte_mbuf (vm, b[2], 1);
343 dpdk_validate_rte_mbuf (vm, b[3], 1);
347 dpdk_validate_rte_mbuf (vm, b[0], 0);
348 dpdk_validate_rte_mbuf (vm, b[1], 0);
349 dpdk_validate_rte_mbuf (vm, b[2], 0);
350 dpdk_validate_rte_mbuf (vm, b[3], 0);
353 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
355 (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
356 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM
357 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
359 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
360 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
361 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
362 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
365 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
367 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
368 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
369 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
370 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
371 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
372 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
373 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
374 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
380 #elif (CLIB_N_PREFETCHES >= 4)
383 vlib_buffer_t *b2, *b3;
386 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, STORE);
387 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, STORE);
388 b2 = vlib_buffer_from_rte_mbuf (mb[2]);
389 CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, LOAD);
390 b3 = vlib_buffer_from_rte_mbuf (mb[3]);
391 CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, LOAD);
393 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
394 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
396 or_flags = b[0]->flags | b[1]->flags;
397 all_or_flags |= or_flags;
399 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
400 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
402 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
404 dpdk_validate_rte_mbuf (vm, b[0], 1);
405 dpdk_validate_rte_mbuf (vm, b[1], 1);
409 dpdk_validate_rte_mbuf (vm, b[0], 0);
410 dpdk_validate_rte_mbuf (vm, b[1], 0);
413 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
415 (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
416 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM
417 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
419 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
420 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
423 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
425 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
426 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
427 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
428 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
438 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
439 all_or_flags |= b[0]->flags;
440 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
442 dpdk_validate_rte_mbuf (vm, b[0], 1);
443 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
445 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
446 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
447 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
453 /* transmit as many packets as possible */
454 tx_pkts = n_packets = mb - ptd->mbufs;
455 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets);
458 /* If there is no callback then drop any non-transmitted packets */
459 if (PREDICT_FALSE (n_left))
462 vlib_simple_counter_main_t *cm;
463 vnet_main_t *vnm = vnet_get_main ();
465 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
466 VNET_INTERFACE_COUNTER_TX_ERROR);
468 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
471 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
475 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
483 dpdk_clear_hw_interface_counters (u32 instance)
485 dpdk_main_t *dm = &dpdk_main;
486 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
488 rte_eth_stats_reset (xd->port_id);
489 rte_eth_xstats_reset (xd->port_id);
492 static clib_error_t *
493 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
495 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
496 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
497 dpdk_main_t *dm = &dpdk_main;
498 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
500 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
501 return clib_error_return (0, "Interface not initialized");
505 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
506 dpdk_device_start (xd);
507 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
508 f64 now = vlib_time_now (dm->vlib_main);
509 dpdk_update_counters (xd, now);
510 dpdk_update_link_state (xd, now);
514 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
515 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
516 dpdk_device_stop (xd);
517 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
520 return /* no error */ 0;
524 * Dynamically redirect all pkts from a specific interface
525 * to the specified node
528 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
531 dpdk_main_t *xm = &dpdk_main;
532 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
533 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
535 /* Shut off redirection */
536 if (node_index == ~0)
538 xd->per_interface_next_index = node_index;
542 xd->per_interface_next_index =
543 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
547 static clib_error_t *
548 dpdk_subif_add_del_function (vnet_main_t * vnm,
550 struct vnet_sw_interface_t *st, int is_add)
552 dpdk_main_t *xm = &dpdk_main;
553 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
554 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
555 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
557 u32 prev_subifs = xd->num_subifs;
558 clib_error_t *err = 0;
562 else if (xd->num_subifs)
565 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
568 /* currently we program VLANS only for IXGBE VF and I40E VF */
569 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
572 if (t->sub.eth.flags.no_tags == 1)
575 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
577 xd->num_subifs = prev_subifs;
578 err = clib_error_return (0, "unsupported VLAN setup");
582 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
583 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
585 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
587 xd->num_subifs = prev_subifs;
588 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
595 rte_eth_dev_vlan_filter (xd->port_id,
596 t->sub.eth.outer_vlan_id, is_add)))
598 xd->num_subifs = prev_subifs;
599 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
606 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
608 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
614 VNET_DEVICE_CLASS (dpdk_device_class) = {
616 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
617 .tx_function_error_strings = dpdk_tx_func_error_strings,
618 .format_device_name = format_dpdk_device_name,
619 .format_device = format_dpdk_device,
620 .format_tx_trace = format_dpdk_tx_trace,
621 .clear_counters = dpdk_clear_hw_interface_counters,
622 .admin_up_down_function = dpdk_interface_admin_up_down,
623 .subif_add_del_function = dpdk_subif_add_del_function,
624 .rx_redirect_to_node = dpdk_set_interface_next_node,
625 .mac_addr_change_function = dpdk_set_mac_address,
626 .mac_addr_add_del_function = dpdk_add_del_mac_address,
627 .format_flow = format_dpdk_flow,
628 .flow_ops_function = dpdk_flow_ops_fn,
632 #define UP_DOWN_FLAG_EVENT 1
635 admin_up_down_process (vlib_main_t * vm,
636 vlib_node_runtime_t * rt, vlib_frame_t * f)
638 clib_error_t *error = 0;
640 uword *event_data = 0;
646 vlib_process_wait_for_event (vm);
648 event_type = vlib_process_get_events (vm, &event_data);
650 dpdk_main.admin_up_down_in_progress = 1;
654 case UP_DOWN_FLAG_EVENT:
656 if (vec_len (event_data) == 2)
658 sw_if_index = event_data[0];
659 flags = event_data[1];
661 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
663 clib_error_report (error);
669 vec_reset_length (event_data);
671 dpdk_main.admin_up_down_in_progress = 0;
674 return 0; /* or not */
678 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
679 .function = admin_up_down_process,
680 .type = VLIB_NODE_TYPE_PROCESS,
681 .name = "admin-up-down-process",
682 .process_log2_n_stack_bytes = 17, // 256KB
687 * fd.io coding-style-patch-verification: ON
690 * eval: (c-set-style "gnu")