2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
33 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
34 foreach_dpdk_tx_func_error
37 } dpdk_tx_func_error_t;
39 static char *dpdk_tx_func_error_strings[] = {
41 foreach_dpdk_tx_func_error
46 dpdk_set_mac_address (vnet_hw_interface_t * hi,
47 const u8 * old_address, const u8 * address)
50 dpdk_main_t *dm = &dpdk_main;
51 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
53 error = rte_eth_dev_default_mac_addr_set (xd->port_id,
54 (struct ether_addr *) address);
58 return clib_error_return (0, "mac address set failed: %d", error);
62 vec_reset_length (xd->default_mac_address);
63 vec_add (xd->default_mac_address, address, sizeof (address));
69 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
70 dpdk_device_t * xd, u16 queue_id,
71 vlib_buffer_t * buffer)
73 vlib_main_t *vm = vlib_get_main ();
77 mb = rte_mbuf_from_vlib_buffer (buffer);
79 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
80 t0->queue_index = queue_id;
81 t0->device_index = xd->device_index;
82 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
83 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
84 clib_memcpy_fast (&t0->buffer, buffer,
85 sizeof (buffer[0]) - sizeof (buffer->pre_data));
86 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
87 sizeof (t0->buffer.pre_data));
88 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
92 static_always_inline void
93 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
96 struct rte_mbuf *mb, *first_mb, *last_mb;
98 /* buffer is coming from non-dpdk source so we need to init
100 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
102 vlib_buffer_t *b2 = b;
103 last_mb = mb = rte_mbuf_from_vlib_buffer (b2);
104 rte_pktmbuf_reset (mb);
105 while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT))
107 b2 = vlib_get_buffer (vm, b2->next_buffer);
108 mb = rte_mbuf_from_vlib_buffer (b2);
109 rte_pktmbuf_reset (mb);
113 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
114 first_mb->nb_segs = 1;
115 mb->data_len = b->current_length;
116 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
118 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
120 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
122 b = vlib_get_buffer (vm, b->next_buffer);
123 mb = rte_mbuf_from_vlib_buffer (b);
126 mb->data_len = b->current_length;
127 mb->pkt_len = b->current_length;
128 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
130 if (PREDICT_FALSE (b->ref_count > 1))
132 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
137 * This function calls the dpdk's tx_burst function to transmit the packets.
138 * It manages a lock per-device if the device does not
139 * support multiple queues. It returns the number of packets untransmitted
140 * If all packets are transmitted (the normal case), the function returns 0.
143 u32 tx_burst_vector_internal (vlib_main_t * vm,
145 struct rte_mbuf **mb, u32 n_left)
147 dpdk_main_t *dm = &dpdk_main;
153 queue_id = vm->thread_index;
158 * This device only supports one TX queue,
159 * and we're running multi-threaded...
161 if (PREDICT_FALSE (xd->lockp != 0))
163 queue_id = queue_id % xd->tx_q_used;
164 while (clib_atomic_test_and_set (xd->lockp[queue_id]))
166 queue_id = (queue_id + 1) % xd->tx_q_used;
170 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
172 /* no wrap, transmit in one burst */
173 dpdk_device_hqos_per_worker_thread_t *hqos =
174 &xd->hqos_wt[vm->thread_index];
176 ASSERT (hqos->swq != NULL);
178 dpdk_hqos_metadata_set (hqos, mb, n_left);
179 n_sent = rte_ring_sp_enqueue_burst (hqos->swq, (void **) mb,
184 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
186 /* no wrap, transmit in one burst */
187 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
195 if (PREDICT_FALSE (xd->lockp != 0))
196 clib_atomic_release (xd->lockp[queue_id]);
198 if (PREDICT_FALSE (n_sent < 0))
200 // emit non-fatal message, bump counter
201 vnet_main_t *vnm = dm->vnet_main;
202 vnet_interface_main_t *im = &vnm->interface_main;
205 node_index = vec_elt_at_index (im->hw_interfaces,
206 xd->hw_if_index)->tx_node_index;
208 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
209 clib_warning ("rte_eth_tx_burst[%d]: error %d",
210 xd->port_id, n_sent);
211 return n_left; // untransmitted packets
216 while (n_sent && n_left && (n_retry > 0));
221 static_always_inline void
222 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
224 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
225 CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE);
226 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
229 static_always_inline void
230 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
233 u32 ip_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
234 u32 tcp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
235 u32 udp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
236 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
239 /* Is there any work for us? */
240 if (PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum) == 0))
243 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
244 mb->l3_len = vnet_buffer (b)->l4_hdr_offset -
245 vnet_buffer (b)->l3_hdr_offset;
246 mb->outer_l3_len = 0;
247 mb->outer_l2_len = 0;
248 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
249 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
250 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
251 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
252 mb->ol_flags |= ol_flags;
254 /* we are trying to help compiler here by using local ol_flags with known
255 state of all flags */
256 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
257 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
261 * Transmits the packets on the frame to the interface associated with the
262 * node. It first copies packets on the frame to a per-thread arrays
263 * containing the rte_mbuf pointers.
265 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
266 vlib_node_runtime_t * node,
269 dpdk_main_t *dm = &dpdk_main;
270 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
271 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
272 u32 n_packets = f->n_vectors;
275 u32 thread_index = vm->thread_index;
276 int queue_id = thread_index;
277 u32 tx_pkts = 0, all_or_flags = 0;
278 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
280 struct rte_mbuf **mb;
283 from = vlib_frame_vector_args (f);
285 ASSERT (n_packets <= VLIB_FRAME_SIZE);
287 /* TX PCAP tracing */
288 if (PREDICT_FALSE (dm->pcap[VLIB_TX].pcap_enable))
294 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
295 if (dm->pcap[VLIB_TX].pcap_sw_if_index == 0 ||
296 dm->pcap[VLIB_TX].pcap_sw_if_index
297 == vnet_buffer (b0)->sw_if_index[VLIB_TX])
298 pcap_add_buffer (&dm->pcap[VLIB_TX].pcap_main, vm, bi0, 512);
304 /* calculate rte_mbuf pointers out of buffer indices */
305 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
306 (void **) ptd->mbufs, n_packets,
307 -(i32) sizeof (struct rte_mbuf));
309 from = vlib_frame_vector_args (f);
317 dpdk_prefetch_buffer (vm, mb[4]);
318 dpdk_prefetch_buffer (vm, mb[5]);
319 dpdk_prefetch_buffer (vm, mb[6]);
320 dpdk_prefetch_buffer (vm, mb[7]);
322 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
323 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
324 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
325 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
327 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
328 all_or_flags |= or_flags;
330 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
331 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
332 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
333 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
335 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
337 dpdk_validate_rte_mbuf (vm, b[0], 1);
338 dpdk_validate_rte_mbuf (vm, b[1], 1);
339 dpdk_validate_rte_mbuf (vm, b[2], 1);
340 dpdk_validate_rte_mbuf (vm, b[3], 1);
344 dpdk_validate_rte_mbuf (vm, b[0], 0);
345 dpdk_validate_rte_mbuf (vm, b[1], 0);
346 dpdk_validate_rte_mbuf (vm, b[2], 0);
347 dpdk_validate_rte_mbuf (vm, b[3], 0);
350 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
352 (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
353 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM
354 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
356 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
357 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
358 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
359 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
362 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
364 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
365 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
366 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
367 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
368 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
369 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
370 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
371 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
379 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
380 all_or_flags |= b[0]->flags;
381 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
383 dpdk_validate_rte_mbuf (vm, b[0], 1);
384 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
386 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
387 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
388 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
394 /* transmit as many packets as possible */
395 tx_pkts = n_packets = mb - ptd->mbufs;
396 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets);
399 /* If there is no callback then drop any non-transmitted packets */
400 if (PREDICT_FALSE (n_left))
403 vlib_simple_counter_main_t *cm;
404 vnet_main_t *vnm = vnet_get_main ();
406 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
407 VNET_INTERFACE_COUNTER_TX_ERROR);
409 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
412 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
416 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
424 dpdk_clear_hw_interface_counters (u32 instance)
426 dpdk_main_t *dm = &dpdk_main;
427 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
430 * Set the "last_cleared_stats" to the current stats, so that
431 * things appear to clear from a display perspective.
433 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
435 clib_memcpy_fast (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
436 clib_memcpy_fast (xd->last_cleared_xstats, xd->xstats,
437 vec_len (xd->last_cleared_xstats) *
438 sizeof (xd->last_cleared_xstats[0]));
442 static clib_error_t *
443 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
445 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
446 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
447 dpdk_main_t *dm = &dpdk_main;
448 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
450 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
451 return clib_error_return (0, "Interface not initialized");
455 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
456 dpdk_device_start (xd);
457 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
458 f64 now = vlib_time_now (dm->vlib_main);
459 dpdk_update_counters (xd, now);
460 dpdk_update_link_state (xd, now);
464 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
465 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
466 dpdk_device_stop (xd);
467 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
470 return /* no error */ 0;
474 * Dynamically redirect all pkts from a specific interface
475 * to the specified node
478 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
481 dpdk_main_t *xm = &dpdk_main;
482 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
483 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
485 /* Shut off redirection */
486 if (node_index == ~0)
488 xd->per_interface_next_index = node_index;
492 xd->per_interface_next_index =
493 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
497 static clib_error_t *
498 dpdk_subif_add_del_function (vnet_main_t * vnm,
500 struct vnet_sw_interface_t *st, int is_add)
502 dpdk_main_t *xm = &dpdk_main;
503 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
504 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
505 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
507 u32 prev_subifs = xd->num_subifs;
508 clib_error_t *err = 0;
512 else if (xd->num_subifs)
515 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
518 /* currently we program VLANS only for IXGBE VF and I40E VF */
519 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
522 if (t->sub.eth.flags.no_tags == 1)
525 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
527 xd->num_subifs = prev_subifs;
528 err = clib_error_return (0, "unsupported VLAN setup");
532 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
533 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
535 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
537 xd->num_subifs = prev_subifs;
538 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
545 rte_eth_dev_vlan_filter (xd->port_id,
546 t->sub.eth.outer_vlan_id, is_add)))
548 xd->num_subifs = prev_subifs;
549 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
556 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
558 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
564 VNET_DEVICE_CLASS (dpdk_device_class) = {
566 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
567 .tx_function_error_strings = dpdk_tx_func_error_strings,
568 .format_device_name = format_dpdk_device_name,
569 .format_device = format_dpdk_device,
570 .format_tx_trace = format_dpdk_tx_trace,
571 .clear_counters = dpdk_clear_hw_interface_counters,
572 .admin_up_down_function = dpdk_interface_admin_up_down,
573 .subif_add_del_function = dpdk_subif_add_del_function,
574 .rx_redirect_to_node = dpdk_set_interface_next_node,
575 .mac_addr_change_function = dpdk_set_mac_address,
576 .format_flow = format_dpdk_flow,
577 .flow_ops_function = dpdk_flow_ops_fn,
581 #define UP_DOWN_FLAG_EVENT 1
584 admin_up_down_process (vlib_main_t * vm,
585 vlib_node_runtime_t * rt, vlib_frame_t * f)
587 clib_error_t *error = 0;
589 uword *event_data = 0;
595 vlib_process_wait_for_event (vm);
597 event_type = vlib_process_get_events (vm, &event_data);
599 dpdk_main.admin_up_down_in_progress = 1;
603 case UP_DOWN_FLAG_EVENT:
605 if (vec_len (event_data) == 2)
607 sw_if_index = event_data[0];
608 flags = event_data[1];
610 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
612 clib_error_report (error);
618 vec_reset_length (event_data);
620 dpdk_main.admin_up_down_in_progress = 0;
623 return 0; /* or not */
627 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
628 .function = admin_up_down_process,
629 .type = VLIB_NODE_TYPE_PROCESS,
630 .name = "admin-up-down-process",
631 .process_log2_n_stack_bytes = 17, // 256KB
636 * fd.io coding-style-patch-verification: ON
639 * eval: (c-set-style "gnu")