2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <dpdk/buffer.h>
22 #include <dpdk/device/dpdk.h>
23 #include <dpdk/device/dpdk_priv.h>
24 #include <vppinfra/error.h>
26 #define foreach_dpdk_tx_func_error \
27 _(BAD_RETVAL, "DPDK tx function returned an error") \
28 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
32 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
33 foreach_dpdk_tx_func_error
36 } dpdk_tx_func_error_t;
38 static char *dpdk_tx_func_error_strings[] = {
40 foreach_dpdk_tx_func_error
45 dpdk_add_del_mac_address (vnet_hw_interface_t * hi,
46 const u8 * address, u8 is_add)
49 dpdk_main_t *dm = &dpdk_main;
50 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
53 error = rte_eth_dev_mac_addr_add (xd->port_id,
54 (struct rte_ether_addr *) address, 0);
56 error = rte_eth_dev_mac_addr_remove (xd->port_id,
57 (struct rte_ether_addr *) address);
61 return clib_error_return (0, "mac address add/del failed: %d", error);
68 dpdk_set_mac_address (vnet_hw_interface_t * hi,
69 const u8 * old_address, const u8 * address)
72 dpdk_main_t *dm = &dpdk_main;
73 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
75 error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address);
79 return clib_error_return (0, "mac address set failed: %d", error);
83 vec_reset_length (xd->default_mac_address);
84 vec_add (xd->default_mac_address, address, sizeof (mac_address_t));
90 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
91 dpdk_device_t * xd, u16 queue_id,
92 vlib_buffer_t * buffer)
94 vlib_main_t *vm = vlib_get_main ();
98 mb = rte_mbuf_from_vlib_buffer (buffer);
100 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
101 t0->queue_index = queue_id;
102 t0->device_index = xd->device_index;
103 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
104 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
105 clib_memcpy_fast (&t0->buffer, buffer,
106 sizeof (buffer[0]) - sizeof (buffer->pre_data));
107 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
108 sizeof (t0->buffer.pre_data));
109 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
113 static_always_inline void
114 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
117 struct rte_mbuf *mb, *first_mb, *last_mb;
118 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
120 /* buffer is coming from non-dpdk source so we need to init
122 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
123 rte_pktmbuf_reset (mb);
125 first_mb->nb_segs = 1;
126 mb->data_len = b->current_length;
127 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
129 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
131 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
133 b = vlib_get_buffer (vm, b->next_buffer);
134 mb = rte_mbuf_from_vlib_buffer (b);
135 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
136 rte_pktmbuf_reset (mb);
139 mb->data_len = b->current_length;
140 mb->pkt_len = b->current_length;
141 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
143 if (PREDICT_FALSE (b->ref_count > 1))
145 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
150 * This function calls the dpdk's tx_burst function to transmit the packets.
151 * It manages a lock per-device if the device does not
152 * support multiple queues. It returns the number of packets untransmitted
153 * If all packets are transmitted (the normal case), the function returns 0.
156 u32 tx_burst_vector_internal (vlib_main_t * vm,
158 struct rte_mbuf **mb, u32 n_left)
160 dpdk_main_t *dm = &dpdk_main;
161 dpdk_tx_queue_t *txq;
167 queue_id = vm->thread_index % xd->tx_q_used;
168 txq = vec_elt_at_index (xd->tx_queues, queue_id);
172 clib_spinlock_lock_if_init (&txq->lock);
174 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
176 /* no wrap, transmit in one burst */
177 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
186 clib_spinlock_unlock_if_init (&txq->lock);
188 if (PREDICT_FALSE (n_sent < 0))
190 // emit non-fatal message, bump counter
191 vnet_main_t *vnm = dm->vnet_main;
192 vnet_interface_main_t *im = &vnm->interface_main;
195 node_index = vec_elt_at_index (im->hw_interfaces,
196 xd->hw_if_index)->tx_node_index;
198 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
199 return n_left; // untransmitted packets
204 while (n_sent && n_left && (n_retry > 0));
209 static_always_inline __clib_unused void
210 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
212 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
213 CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE);
214 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
217 static_always_inline void
218 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
221 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
222 u32 tso = b->flags & VNET_BUFFER_F_GSO, max_pkt_len;
223 u32 oflags, ip_cksum, tcp_cksum, udp_cksum;
226 /* Is there any work for us? */
227 if (PREDICT_TRUE (((b->flags & VNET_BUFFER_F_OFFLOAD) | tso) == 0))
230 oflags = vnet_buffer2 (b)->oflags;
231 ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
232 tcp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
233 udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
235 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
236 mb->l3_len = vnet_buffer (b)->l4_hdr_offset -
237 vnet_buffer (b)->l3_hdr_offset;
238 mb->outer_l3_len = 0;
239 mb->outer_l2_len = 0;
240 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
241 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
242 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
243 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
247 mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz;
248 mb->tso_segsz = vnet_buffer2 (b)->gso_size;
249 /* ensure packet is large enough to require tso */
250 max_pkt_len = mb->l2_len + mb->l3_len + mb->l4_len + mb->tso_segsz;
251 if (mb->tso_segsz != 0 && mb->pkt_len > max_pkt_len)
252 ol_flags |= (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG);
255 mb->ol_flags |= ol_flags;
257 /* we are trying to help compiler here by using local ol_flags with known
258 state of all flags */
259 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
260 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
264 * Transmits the packets on the frame to the interface associated with the
265 * node. It first copies packets on the frame to a per-thread arrays
266 * containing the rte_mbuf pointers.
268 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
269 vlib_node_runtime_t * node,
272 dpdk_main_t *dm = &dpdk_main;
273 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
274 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
275 u32 n_packets = f->n_vectors;
277 u32 thread_index = vm->thread_index;
278 int queue_id = thread_index;
279 u32 tx_pkts = 0, all_or_flags = 0;
280 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
282 struct rte_mbuf **mb;
285 ASSERT (n_packets <= VLIB_FRAME_SIZE);
287 /* calculate rte_mbuf pointers out of buffer indices */
288 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
289 (void **) ptd->mbufs, n_packets,
290 -(i32) sizeof (struct rte_mbuf));
295 #if (CLIB_N_PREFETCHES >= 8)
300 dpdk_prefetch_buffer (vm, mb[4]);
301 dpdk_prefetch_buffer (vm, mb[5]);
302 dpdk_prefetch_buffer (vm, mb[6]);
303 dpdk_prefetch_buffer (vm, mb[7]);
305 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
306 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
307 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
308 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
310 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
311 all_or_flags |= or_flags;
313 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
314 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
315 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
316 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
318 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
320 dpdk_validate_rte_mbuf (vm, b[0], 1);
321 dpdk_validate_rte_mbuf (vm, b[1], 1);
322 dpdk_validate_rte_mbuf (vm, b[2], 1);
323 dpdk_validate_rte_mbuf (vm, b[3], 1);
327 dpdk_validate_rte_mbuf (vm, b[0], 0);
328 dpdk_validate_rte_mbuf (vm, b[1], 0);
329 dpdk_validate_rte_mbuf (vm, b[2], 0);
330 dpdk_validate_rte_mbuf (vm, b[3], 0);
333 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
334 (or_flags & VNET_BUFFER_F_OFFLOAD)))
336 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
337 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
338 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
339 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
342 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
344 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
345 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
346 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
347 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
348 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
349 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
350 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
351 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
357 #elif (CLIB_N_PREFETCHES >= 4)
360 vlib_buffer_t *b2, *b3;
363 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, STORE);
364 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, STORE);
365 b2 = vlib_buffer_from_rte_mbuf (mb[2]);
366 CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, LOAD);
367 b3 = vlib_buffer_from_rte_mbuf (mb[3]);
368 CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, LOAD);
370 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
371 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
373 or_flags = b[0]->flags | b[1]->flags;
374 all_or_flags |= or_flags;
376 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
377 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
379 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
381 dpdk_validate_rte_mbuf (vm, b[0], 1);
382 dpdk_validate_rte_mbuf (vm, b[1], 1);
386 dpdk_validate_rte_mbuf (vm, b[0], 0);
387 dpdk_validate_rte_mbuf (vm, b[1], 0);
390 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
391 (or_flags & VNET_BUFFER_F_OFFLOAD)))
393 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
394 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
397 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
399 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
400 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
401 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
402 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
412 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
413 all_or_flags |= b[0]->flags;
414 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
416 dpdk_validate_rte_mbuf (vm, b[0], 1);
417 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
419 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
420 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
421 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
427 /* transmit as many packets as possible */
428 tx_pkts = n_packets = mb - ptd->mbufs;
429 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets);
432 /* If there is no callback then drop any non-transmitted packets */
433 if (PREDICT_FALSE (n_left))
436 vlib_simple_counter_main_t *cm;
437 vnet_main_t *vnm = vnet_get_main ();
439 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
440 VNET_INTERFACE_COUNTER_TX_ERROR);
442 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
445 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
449 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
457 dpdk_clear_hw_interface_counters (u32 instance)
459 dpdk_main_t *dm = &dpdk_main;
460 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
462 rte_eth_stats_reset (xd->port_id);
463 rte_eth_xstats_reset (xd->port_id);
466 static clib_error_t *
467 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
469 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
470 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
471 dpdk_main_t *dm = &dpdk_main;
472 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
474 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
475 return clib_error_return (0, "Interface not initialized");
479 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
480 dpdk_device_start (xd);
481 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
482 f64 now = vlib_time_now (dm->vlib_main);
483 dpdk_update_counters (xd, now);
484 dpdk_update_link_state (xd, now);
488 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
489 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
490 dpdk_device_stop (xd);
491 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
494 return /* no error */ 0;
498 * Dynamically redirect all pkts from a specific interface
499 * to the specified node
502 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
505 dpdk_main_t *xm = &dpdk_main;
506 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
507 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
509 /* Shut off redirection */
510 if (node_index == ~0)
512 xd->per_interface_next_index = node_index;
516 xd->per_interface_next_index =
517 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
521 static clib_error_t *
522 dpdk_subif_add_del_function (vnet_main_t * vnm,
524 struct vnet_sw_interface_t *st, int is_add)
526 dpdk_main_t *xm = &dpdk_main;
527 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
528 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
529 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
531 u32 prev_subifs = xd->num_subifs;
532 clib_error_t *err = 0;
536 else if (xd->num_subifs)
539 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
542 /* currently we program VLANS only for IXGBE VF */
543 if (xd->pmd != VNET_DPDK_PMD_IXGBEVF)
546 if (t->sub.eth.flags.no_tags == 1)
549 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
551 xd->num_subifs = prev_subifs;
552 err = clib_error_return (0, "unsupported VLAN setup");
556 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
557 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
559 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
561 xd->num_subifs = prev_subifs;
562 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
569 rte_eth_dev_vlan_filter (xd->port_id,
570 t->sub.eth.outer_vlan_id, is_add)))
572 xd->num_subifs = prev_subifs;
573 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
580 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
582 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
587 static clib_error_t *
588 dpdk_interface_set_rss_queues (struct vnet_main_t *vnm,
589 struct vnet_hw_interface_t *hi,
590 clib_bitmap_t * bitmap)
592 dpdk_main_t *xm = &dpdk_main;
593 u32 hw_if_index = hi->hw_if_index;
594 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
595 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
596 clib_error_t *err = 0;
597 struct rte_eth_rss_reta_entry64 *reta_conf = NULL;
598 struct rte_eth_dev_info dev_info;
600 u16 *valid_queue = NULL;
601 u16 valid_queue_count = 0;
605 rte_eth_dev_info_get (xd->port_id, &dev_info);
607 /* parameter check */
608 if (clib_bitmap_count_set_bits (bitmap) == 0)
610 err = clib_error_return (0, "must assign at least one valid rss queue");
614 if (clib_bitmap_count_set_bits (bitmap) > dev_info.nb_rx_queues)
616 err = clib_error_return (0, "too many rss queues");
621 reta = clib_mem_alloc (dev_info.reta_size * sizeof (*reta));
624 err = clib_error_return (0, "clib_mem_alloc failed");
628 clib_memset (reta, 0, dev_info.reta_size * sizeof (*reta));
630 valid_queue_count = 0;
632 clib_bitmap_foreach (i, bitmap) {
633 if (i >= dev_info.nb_rx_queues)
635 err = clib_error_return (0, "illegal queue number");
638 reta[valid_queue_count++] = i;
642 /* check valid_queue_count not zero, make coverity happy */
643 if (valid_queue_count == 0)
645 err = clib_error_return (0, "must assign at least one valid rss queue");
650 for (i = valid_queue_count, j = 0; i < dev_info.reta_size; i++, j++)
652 j = j % valid_queue_count;
653 reta[i] = valid_queue[j];
656 /* update reta table */
658 (struct rte_eth_rss_reta_entry64 *) clib_mem_alloc (dev_info.reta_size /
659 RTE_RETA_GROUP_SIZE *
660 sizeof (*reta_conf));
661 if (reta_conf == NULL)
663 err = clib_error_return (0, "clib_mem_alloc failed");
667 clib_memset (reta_conf, 0,
668 dev_info.reta_size / RTE_RETA_GROUP_SIZE *
669 sizeof (*reta_conf));
671 for (i = 0; i < dev_info.reta_size; i++)
673 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
674 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
676 reta_conf[reta_id].mask = UINT64_MAX;
677 reta_conf[reta_id].reta[reta_pos] = reta[i];
681 rte_eth_dev_rss_reta_update (xd->port_id, reta_conf, dev_info.reta_size);
684 err = clib_error_return (0, "rte_eth_dev_rss_reta_update err %d", ret);
690 clib_mem_free (reta);
692 clib_mem_free (reta_conf);
697 static clib_error_t *
698 dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
699 vnet_hw_if_rx_mode mode)
701 dpdk_main_t *xm = &dpdk_main;
702 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
703 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
705 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
706 return clib_error_return (0, "unsupported op (is the interface up?)", rv);
707 if (mode == VNET_HW_IF_RX_MODE_POLLING)
708 rv = rte_eth_dev_rx_intr_disable (xd->port_id, qid);
710 rv = rte_eth_dev_rx_intr_enable (xd->port_id, qid);
712 return clib_error_return (0, "dpdk_interface_rx_mode_change err %d", rv);
717 VNET_DEVICE_CLASS (dpdk_device_class) = {
719 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
720 .tx_function_error_strings = dpdk_tx_func_error_strings,
721 .format_device_name = format_dpdk_device_name,
722 .format_device = format_dpdk_device,
723 .format_tx_trace = format_dpdk_tx_trace,
724 .clear_counters = dpdk_clear_hw_interface_counters,
725 .admin_up_down_function = dpdk_interface_admin_up_down,
726 .subif_add_del_function = dpdk_subif_add_del_function,
727 .rx_redirect_to_node = dpdk_set_interface_next_node,
728 .mac_addr_change_function = dpdk_set_mac_address,
729 .mac_addr_add_del_function = dpdk_add_del_mac_address,
730 .format_flow = format_dpdk_flow,
731 .flow_ops_function = dpdk_flow_ops_fn,
732 .set_rss_queues_function = dpdk_interface_set_rss_queues,
733 .rx_mode_change_function = dpdk_interface_rx_mode_change,
737 #define UP_DOWN_FLAG_EVENT 1
740 admin_up_down_process (vlib_main_t * vm,
741 vlib_node_runtime_t * rt, vlib_frame_t * f)
743 clib_error_t *error = 0;
745 uword *event_data = 0;
751 vlib_process_wait_for_event (vm);
753 event_type = vlib_process_get_events (vm, &event_data);
755 dpdk_main.admin_up_down_in_progress = 1;
759 case UP_DOWN_FLAG_EVENT:
761 if (vec_len (event_data) == 2)
763 sw_if_index = event_data[0];
764 flags = event_data[1];
766 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
768 clib_error_report (error);
774 vec_reset_length (event_data);
776 dpdk_main.admin_up_down_in_progress = 0;
779 return 0; /* or not */
783 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
784 .function = admin_up_down_process,
785 .type = VLIB_NODE_TYPE_PROCESS,
786 .name = "admin-up-down-process",
787 .process_log2_n_stack_bytes = 17, // 256KB
792 * fd.io coding-style-patch-verification: ON
795 * eval: (c-set-style "gnu")