2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <dpdk/buffer.h>
22 #include <dpdk/device/dpdk.h>
23 #include <dpdk/device/dpdk_priv.h>
24 #include <vppinfra/error.h>
25 #include <vlib/unix/unix.h>
27 #define foreach_dpdk_tx_func_error \
28 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
32 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
33 foreach_dpdk_tx_func_error
36 } dpdk_tx_func_error_t;
38 static char *dpdk_tx_func_error_strings[] = {
40 foreach_dpdk_tx_func_error
45 dpdk_add_del_mac_address (vnet_hw_interface_t * hi,
46 const u8 * address, u8 is_add)
49 dpdk_main_t *dm = &dpdk_main;
50 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
53 error = rte_eth_dev_mac_addr_add (xd->port_id,
54 (struct rte_ether_addr *) address, 0);
56 error = rte_eth_dev_mac_addr_remove (xd->port_id,
57 (struct rte_ether_addr *) address);
61 return clib_error_return (0, "mac address add/del failed: %d", error);
68 dpdk_set_mac_address (vnet_hw_interface_t * hi,
69 const u8 * old_address, const u8 * address)
72 dpdk_main_t *dm = &dpdk_main;
73 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
75 error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address);
79 return clib_error_return (0, "mac address set failed: %d", error);
83 vec_reset_length (xd->default_mac_address);
84 vec_add (xd->default_mac_address, address, sizeof (mac_address_t));
90 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
91 dpdk_device_t * xd, u16 queue_id,
92 vlib_buffer_t * buffer)
94 vlib_main_t *vm = vlib_get_main ();
98 mb = rte_mbuf_from_vlib_buffer (buffer);
100 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
101 t0->queue_index = queue_id;
102 t0->device_index = xd->device_index;
103 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
104 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
105 clib_memcpy_fast (&t0->buffer, buffer,
106 sizeof (buffer[0]) - sizeof (buffer->pre_data));
107 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
108 sizeof (t0->buffer.pre_data));
109 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
113 static_always_inline void
114 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
117 struct rte_mbuf *mb, *first_mb, *last_mb;
118 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
120 /* buffer is coming from non-dpdk source so we need to init
122 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
123 rte_pktmbuf_reset (mb);
125 first_mb->nb_segs = 1;
126 mb->data_len = b->current_length;
127 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
129 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
131 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
133 b = vlib_get_buffer (vm, b->next_buffer);
134 mb = rte_mbuf_from_vlib_buffer (b);
135 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
136 rte_pktmbuf_reset (mb);
139 mb->data_len = b->current_length;
140 mb->pkt_len = b->current_length;
141 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
143 if (PREDICT_FALSE (b->ref_count > 1))
145 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
150 * This function calls the dpdk's tx_burst function to transmit the packets.
151 * It manages a lock per-device if the device does not
152 * support multiple queues. It returns the number of packets untransmitted
153 * If all packets are transmitted (the normal case), the function returns 0.
155 static_always_inline u32
156 tx_burst_vector_internal (vlib_main_t *vm, dpdk_device_t *xd,
157 struct rte_mbuf **mb, u32 n_left, int queue_id,
160 dpdk_tx_queue_t *txq;
165 txq = vec_elt_at_index (xd->tx_queues, queue_id);
170 clib_spinlock_lock (&txq->lock);
172 /* no wrap, transmit in one burst */
173 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
176 clib_spinlock_unlock (&txq->lock);
182 while (n_sent && n_left && (n_retry > 0));
187 static_always_inline __clib_unused void
188 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
190 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
191 CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE);
192 clib_prefetch_load (b);
195 static_always_inline void
196 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
199 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
200 u32 tso = b->flags & VNET_BUFFER_F_GSO, max_pkt_len;
201 u32 ip_cksum, tcp_cksum, udp_cksum, outer_hdr_len = 0;
202 u32 outer_ip_cksum, vxlan_tunnel;
204 vnet_buffer_oflags_t oflags = 0;
206 /* Is there any work for us? */
207 if (PREDICT_TRUE (((b->flags & VNET_BUFFER_F_OFFLOAD) | tso) == 0))
210 oflags = vnet_buffer (b)->oflags;
211 ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
212 tcp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
213 udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
214 outer_ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM;
215 vxlan_tunnel = oflags & VNET_BUFFER_OFFLOAD_F_TNL_VXLAN;
217 ol_flags = is_ip4 ? RTE_MBUF_F_TX_IPV4 : RTE_MBUF_F_TX_IPV6;
218 ol_flags |= ip_cksum ? RTE_MBUF_F_TX_IP_CKSUM : 0;
219 ol_flags |= tcp_cksum ? RTE_MBUF_F_TX_TCP_CKSUM : 0;
220 ol_flags |= udp_cksum ? RTE_MBUF_F_TX_UDP_CKSUM : 0;
224 ol_flags |= outer_ip_cksum ?
225 RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM :
226 RTE_MBUF_F_TX_OUTER_IPV6;
227 ol_flags |= RTE_MBUF_F_TX_TUNNEL_VXLAN;
229 vnet_buffer (b)->l3_hdr_offset - vnet_buffer2 (b)->outer_l4_hdr_offset;
231 vnet_buffer (b)->l4_hdr_offset - vnet_buffer (b)->l3_hdr_offset;
233 vnet_buffer2 (b)->outer_l3_hdr_offset - b->current_data;
234 mb->outer_l3_len = vnet_buffer2 (b)->outer_l4_hdr_offset -
235 vnet_buffer2 (b)->outer_l3_hdr_offset;
236 outer_hdr_len = mb->outer_l2_len + mb->outer_l3_len;
240 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
242 vnet_buffer (b)->l4_hdr_offset - vnet_buffer (b)->l3_hdr_offset;
243 mb->outer_l2_len = 0;
244 mb->outer_l3_len = 0;
249 mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz;
250 mb->tso_segsz = vnet_buffer2 (b)->gso_size;
251 /* ensure packet is large enough to require tso */
253 outer_hdr_len + mb->l2_len + mb->l3_len + mb->l4_len + mb->tso_segsz;
254 if (mb->tso_segsz != 0 && mb->pkt_len > max_pkt_len)
256 (tcp_cksum ? RTE_MBUF_F_TX_TCP_SEG : RTE_MBUF_F_TX_UDP_SEG);
259 mb->ol_flags |= ol_flags;
261 /* we are trying to help compiler here by using local ol_flags with known
262 state of all flags */
263 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
264 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
268 * Transmits the packets on the frame to the interface associated with the
269 * node. It first copies packets on the frame to a per-thread arrays
270 * containing the rte_mbuf pointers.
272 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
273 vlib_node_runtime_t * node,
276 dpdk_main_t *dm = &dpdk_main;
277 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
278 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
279 vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (f);
280 u32 n_packets = f->n_vectors;
282 u32 thread_index = vm->thread_index;
283 int queue_id = tf->queue_id;
284 u8 is_shared = tf->shared_queue;
286 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
288 struct rte_mbuf **mb;
291 ASSERT (n_packets <= VLIB_FRAME_SIZE);
293 /* calculate rte_mbuf pointers out of buffer indices */
294 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
295 (void **) ptd->mbufs, n_packets,
296 -(i32) sizeof (struct rte_mbuf));
301 #if (CLIB_N_PREFETCHES >= 8)
306 dpdk_prefetch_buffer (vm, mb[4]);
307 dpdk_prefetch_buffer (vm, mb[5]);
308 dpdk_prefetch_buffer (vm, mb[6]);
309 dpdk_prefetch_buffer (vm, mb[7]);
311 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
312 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
313 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
314 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
316 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
318 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
320 dpdk_validate_rte_mbuf (vm, b[0], 1);
321 dpdk_validate_rte_mbuf (vm, b[1], 1);
322 dpdk_validate_rte_mbuf (vm, b[2], 1);
323 dpdk_validate_rte_mbuf (vm, b[3], 1);
327 dpdk_validate_rte_mbuf (vm, b[0], 0);
328 dpdk_validate_rte_mbuf (vm, b[1], 0);
329 dpdk_validate_rte_mbuf (vm, b[2], 0);
330 dpdk_validate_rte_mbuf (vm, b[3], 0);
333 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
334 (or_flags & VNET_BUFFER_F_OFFLOAD)))
336 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
337 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
338 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
339 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
342 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
344 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
345 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
346 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
347 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
348 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
349 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
350 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
351 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
357 #elif (CLIB_N_PREFETCHES >= 4)
360 vlib_buffer_t *b2, *b3;
363 clib_prefetch_store (mb[2]);
364 clib_prefetch_store (mb[3]);
365 b2 = vlib_buffer_from_rte_mbuf (mb[2]);
366 clib_prefetch_load (b2);
367 b3 = vlib_buffer_from_rte_mbuf (mb[3]);
368 clib_prefetch_load (b3);
370 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
371 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
373 or_flags = b[0]->flags | b[1]->flags;
375 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
377 dpdk_validate_rte_mbuf (vm, b[0], 1);
378 dpdk_validate_rte_mbuf (vm, b[1], 1);
382 dpdk_validate_rte_mbuf (vm, b[0], 0);
383 dpdk_validate_rte_mbuf (vm, b[1], 0);
386 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
387 (or_flags & VNET_BUFFER_F_OFFLOAD)))
389 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
390 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
393 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
395 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
396 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
397 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
398 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
408 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
410 dpdk_validate_rte_mbuf (vm, b[0], 1);
411 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
413 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
414 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
415 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
421 /* transmit as many packets as possible */
422 tx_pkts = n_packets = mb - ptd->mbufs;
423 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets, queue_id,
427 /* If there is no callback then drop any non-transmitted packets */
428 if (PREDICT_FALSE (n_left))
431 vlib_simple_counter_main_t *cm;
432 vnet_main_t *vnm = vnet_get_main ();
434 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
435 VNET_INTERFACE_COUNTER_TX_ERROR);
437 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
440 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
444 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
452 dpdk_clear_hw_interface_counters (u32 instance)
454 dpdk_main_t *dm = &dpdk_main;
455 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
457 rte_eth_stats_reset (xd->port_id);
458 rte_eth_xstats_reset (xd->port_id);
461 static clib_error_t *
462 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
464 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
465 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
466 dpdk_main_t *dm = &dpdk_main;
467 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
469 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
470 return clib_error_return (0, "Interface not initialized");
474 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
476 dpdk_device_start (xd);
477 if (vec_len (xd->errors))
478 return clib_error_create ("Interface start failed");
479 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
480 f64 now = vlib_time_now (vlib_get_main ());
481 dpdk_update_counters (xd, now);
482 dpdk_update_link_state (xd, now);
487 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
488 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
489 dpdk_device_stop (xd);
490 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
493 return /* no error */ 0;
497 * Dynamically redirect all pkts from a specific interface
498 * to the specified node
501 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
504 dpdk_main_t *xm = &dpdk_main;
505 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
506 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
508 /* Shut off redirection */
509 if (node_index == ~0)
511 xd->per_interface_next_index = node_index;
515 xd->per_interface_next_index =
516 vlib_node_add_next (vlib_get_main (), dpdk_input_node.index, node_index);
520 static clib_error_t *
521 dpdk_subif_add_del_function (vnet_main_t * vnm,
523 struct vnet_sw_interface_t *st, int is_add)
525 dpdk_main_t *xm = &dpdk_main;
526 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
527 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
528 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
530 u32 prev_subifs = xd->num_subifs;
531 clib_error_t *err = 0;
535 else if (xd->num_subifs)
538 /* currently we program VLANS only for IXGBE VF */
539 if (xd->driver->program_vlans == 0)
542 if (t->sub.eth.flags.no_tags == 1)
545 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
547 xd->num_subifs = prev_subifs;
548 err = clib_error_return (0, "unsupported VLAN setup");
552 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
553 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
555 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
557 xd->num_subifs = prev_subifs;
558 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
565 rte_eth_dev_vlan_filter (xd->port_id,
566 t->sub.eth.outer_vlan_id, is_add)))
568 xd->num_subifs = prev_subifs;
569 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
576 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
578 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
583 static clib_error_t *
584 dpdk_interface_set_rss_queues (struct vnet_main_t *vnm,
585 struct vnet_hw_interface_t *hi,
586 clib_bitmap_t * bitmap)
588 dpdk_main_t *xm = &dpdk_main;
589 u32 hw_if_index = hi->hw_if_index;
590 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
591 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
592 clib_error_t *err = 0;
593 struct rte_eth_rss_reta_entry64 *reta_conf = NULL;
594 struct rte_eth_dev_info dev_info;
596 u16 *valid_queue = NULL;
597 u16 valid_queue_count = 0;
601 rte_eth_dev_info_get (xd->port_id, &dev_info);
603 /* parameter check */
604 if (clib_bitmap_count_set_bits (bitmap) == 0)
606 err = clib_error_return (0, "must assign at least one valid rss queue");
610 if (clib_bitmap_count_set_bits (bitmap) > dev_info.nb_rx_queues)
612 err = clib_error_return (0, "too many rss queues");
617 reta = clib_mem_alloc (dev_info.reta_size * sizeof (*reta));
620 err = clib_error_return (0, "clib_mem_alloc failed");
624 clib_memset (reta, 0, dev_info.reta_size * sizeof (*reta));
626 valid_queue_count = 0;
628 clib_bitmap_foreach (i, bitmap) {
629 if (i >= dev_info.nb_rx_queues)
631 err = clib_error_return (0, "illegal queue number");
634 reta[valid_queue_count++] = i;
638 /* check valid_queue_count not zero, make coverity happy */
639 if (valid_queue_count == 0)
641 err = clib_error_return (0, "must assign at least one valid rss queue");
646 for (i = valid_queue_count, j = 0; i < dev_info.reta_size; i++, j++)
648 j = j % valid_queue_count;
649 reta[i] = valid_queue[j];
652 /* update reta table */
653 reta_conf = (struct rte_eth_rss_reta_entry64 *) clib_mem_alloc (
654 dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE * sizeof (*reta_conf));
655 if (reta_conf == NULL)
657 err = clib_error_return (0, "clib_mem_alloc failed");
661 clib_memset (reta_conf, 0,
662 dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE *
663 sizeof (*reta_conf));
665 for (i = 0; i < dev_info.reta_size; i++)
667 uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
668 uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
670 reta_conf[reta_id].mask = UINT64_MAX;
671 reta_conf[reta_id].reta[reta_pos] = reta[i];
675 rte_eth_dev_rss_reta_update (xd->port_id, reta_conf, dev_info.reta_size);
678 err = clib_error_return (0, "rte_eth_dev_rss_reta_update err %d", ret);
684 clib_mem_free (reta);
686 clib_mem_free (reta_conf);
691 static clib_error_t *
692 dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
693 vnet_hw_if_rx_mode mode)
695 dpdk_main_t *xm = &dpdk_main;
696 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
697 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
698 clib_file_main_t *fm = &file_main;
699 dpdk_rx_queue_t *rxq;
702 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
703 return clib_error_return (0, "unsupported op (is the interface up?)", rv);
704 if (mode == VNET_HW_IF_RX_MODE_POLLING &&
705 !(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
706 rv = rte_eth_dev_rx_intr_disable (xd->port_id, qid);
707 else if (mode == VNET_HW_IF_RX_MODE_POLLING)
709 rxq = vec_elt_at_index (xd->rx_queues, qid);
710 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
711 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
713 else if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
714 rv = rte_eth_dev_rx_intr_enable (xd->port_id, qid);
717 rxq = vec_elt_at_index (xd->rx_queues, qid);
718 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
719 fm->file_update (f, UNIX_FILE_UPDATE_ADD);
722 return clib_error_return (0, "dpdk_interface_rx_mode_change err %d", rv);
727 VNET_DEVICE_CLASS (dpdk_device_class) = {
729 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
730 .tx_function_error_strings = dpdk_tx_func_error_strings,
731 .format_device_name = format_dpdk_device_name,
732 .format_device = format_dpdk_device,
733 .format_tx_trace = format_dpdk_tx_trace,
734 .clear_counters = dpdk_clear_hw_interface_counters,
735 .admin_up_down_function = dpdk_interface_admin_up_down,
736 .subif_add_del_function = dpdk_subif_add_del_function,
737 .rx_redirect_to_node = dpdk_set_interface_next_node,
738 .mac_addr_change_function = dpdk_set_mac_address,
739 .mac_addr_add_del_function = dpdk_add_del_mac_address,
740 .format_flow = format_dpdk_flow,
741 .flow_ops_function = dpdk_flow_ops_fn,
742 .set_rss_queues_function = dpdk_interface_set_rss_queues,
743 .rx_mode_change_function = dpdk_interface_rx_mode_change,
747 #define UP_DOWN_FLAG_EVENT 1
750 admin_up_down_process (vlib_main_t * vm,
751 vlib_node_runtime_t * rt, vlib_frame_t * f)
753 clib_error_t *error = 0;
755 uword *event_data = 0;
761 vlib_process_wait_for_event (vm);
763 event_type = vlib_process_get_events (vm, &event_data);
765 dpdk_main.admin_up_down_in_progress = 1;
769 case UP_DOWN_FLAG_EVENT:
771 if (vec_len (event_data) == 2)
773 sw_if_index = event_data[0];
774 flags = event_data[1];
776 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
778 clib_error_report (error);
784 vec_reset_length (event_data);
786 dpdk_main.admin_up_down_in_progress = 0;
789 return 0; /* or not */
793 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
794 .function = admin_up_down_process,
795 .type = VLIB_NODE_TYPE_PROCESS,
796 .name = "admin-up-down-process",
797 .process_log2_n_stack_bytes = 17, // 256KB
802 * fd.io coding-style-patch-verification: ON
805 * eval: (c-set-style "gnu")