2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <dpdk/buffer.h>
22 #include <dpdk/device/dpdk.h>
23 #include <dpdk/device/dpdk_priv.h>
24 #include <vppinfra/error.h>
25 #include <vlib/unix/unix.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
33 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
34 foreach_dpdk_tx_func_error
37 } dpdk_tx_func_error_t;
39 static char *dpdk_tx_func_error_strings[] = {
41 foreach_dpdk_tx_func_error
46 dpdk_add_del_mac_address (vnet_hw_interface_t * hi,
47 const u8 * address, u8 is_add)
50 dpdk_main_t *dm = &dpdk_main;
51 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
54 error = rte_eth_dev_mac_addr_add (xd->port_id,
55 (struct rte_ether_addr *) address, 0);
57 error = rte_eth_dev_mac_addr_remove (xd->port_id,
58 (struct rte_ether_addr *) address);
62 return clib_error_return (0, "mac address add/del failed: %d", error);
69 dpdk_set_mac_address (vnet_hw_interface_t * hi,
70 const u8 * old_address, const u8 * address)
73 dpdk_main_t *dm = &dpdk_main;
74 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
76 error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address);
80 return clib_error_return (0, "mac address set failed: %d", error);
84 vec_reset_length (xd->default_mac_address);
85 vec_add (xd->default_mac_address, address, sizeof (mac_address_t));
91 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
92 dpdk_device_t * xd, u16 queue_id,
93 vlib_buffer_t * buffer)
95 vlib_main_t *vm = vlib_get_main ();
99 mb = rte_mbuf_from_vlib_buffer (buffer);
101 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
102 t0->queue_index = queue_id;
103 t0->device_index = xd->device_index;
104 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
105 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
106 clib_memcpy_fast (&t0->buffer, buffer,
107 sizeof (buffer[0]) - sizeof (buffer->pre_data));
108 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
109 sizeof (t0->buffer.pre_data));
110 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
114 static_always_inline void
115 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
118 struct rte_mbuf *mb, *first_mb, *last_mb;
119 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
121 /* buffer is coming from non-dpdk source so we need to init
123 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
124 rte_pktmbuf_reset (mb);
126 first_mb->nb_segs = 1;
127 mb->data_len = b->current_length;
128 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
130 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
132 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
134 b = vlib_get_buffer (vm, b->next_buffer);
135 mb = rte_mbuf_from_vlib_buffer (b);
136 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
137 rte_pktmbuf_reset (mb);
140 mb->data_len = b->current_length;
141 mb->pkt_len = b->current_length;
142 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
144 if (PREDICT_FALSE (b->ref_count > 1))
146 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
151 * This function calls the dpdk's tx_burst function to transmit the packets.
152 * It manages a lock per-device if the device does not
153 * support multiple queues. It returns the number of packets untransmitted
154 * If all packets are transmitted (the normal case), the function returns 0.
157 u32 tx_burst_vector_internal (vlib_main_t * vm,
159 struct rte_mbuf **mb, u32 n_left)
161 dpdk_main_t *dm = &dpdk_main;
162 dpdk_tx_queue_t *txq;
168 queue_id = vm->thread_index % xd->tx_q_used;
169 txq = vec_elt_at_index (xd->tx_queues, queue_id);
173 clib_spinlock_lock_if_init (&txq->lock);
175 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
177 /* no wrap, transmit in one burst */
178 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
187 clib_spinlock_unlock_if_init (&txq->lock);
189 if (PREDICT_FALSE (n_sent < 0))
191 // emit non-fatal message, bump counter
192 vnet_main_t *vnm = dm->vnet_main;
193 vnet_interface_main_t *im = &vnm->interface_main;
196 node_index = vec_elt_at_index (im->hw_interfaces,
197 xd->hw_if_index)->tx_node_index;
199 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
200 return n_left; // untransmitted packets
205 while (n_sent && n_left && (n_retry > 0));
210 static_always_inline __clib_unused void
211 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
213 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
214 CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE);
215 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
218 static_always_inline void
219 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
222 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
223 u32 tso = b->flags & VNET_BUFFER_F_GSO, max_pkt_len;
224 u32 oflags, ip_cksum, tcp_cksum, udp_cksum;
227 /* Is there any work for us? */
228 if (PREDICT_TRUE (((b->flags & VNET_BUFFER_F_OFFLOAD) | tso) == 0))
231 oflags = vnet_buffer2 (b)->oflags;
232 ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
233 tcp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
234 udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
236 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
237 mb->l3_len = vnet_buffer (b)->l4_hdr_offset -
238 vnet_buffer (b)->l3_hdr_offset;
239 mb->outer_l3_len = 0;
240 mb->outer_l2_len = 0;
241 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
242 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
243 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
244 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
248 mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz;
249 mb->tso_segsz = vnet_buffer2 (b)->gso_size;
250 /* ensure packet is large enough to require tso */
251 max_pkt_len = mb->l2_len + mb->l3_len + mb->l4_len + mb->tso_segsz;
252 if (mb->tso_segsz != 0 && mb->pkt_len > max_pkt_len)
253 ol_flags |= (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG);
256 mb->ol_flags |= ol_flags;
258 /* we are trying to help compiler here by using local ol_flags with known
259 state of all flags */
260 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
261 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
265 * Transmits the packets on the frame to the interface associated with the
266 * node. It first copies packets on the frame to a per-thread arrays
267 * containing the rte_mbuf pointers.
269 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
270 vlib_node_runtime_t * node,
273 dpdk_main_t *dm = &dpdk_main;
274 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
275 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
276 u32 n_packets = f->n_vectors;
278 u32 thread_index = vm->thread_index;
279 int queue_id = thread_index;
280 u32 tx_pkts = 0, all_or_flags = 0;
281 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
283 struct rte_mbuf **mb;
286 ASSERT (n_packets <= VLIB_FRAME_SIZE);
288 /* calculate rte_mbuf pointers out of buffer indices */
289 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
290 (void **) ptd->mbufs, n_packets,
291 -(i32) sizeof (struct rte_mbuf));
296 #if (CLIB_N_PREFETCHES >= 8)
301 dpdk_prefetch_buffer (vm, mb[4]);
302 dpdk_prefetch_buffer (vm, mb[5]);
303 dpdk_prefetch_buffer (vm, mb[6]);
304 dpdk_prefetch_buffer (vm, mb[7]);
306 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
307 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
308 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
309 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
311 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
312 all_or_flags |= or_flags;
314 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
316 dpdk_validate_rte_mbuf (vm, b[0], 1);
317 dpdk_validate_rte_mbuf (vm, b[1], 1);
318 dpdk_validate_rte_mbuf (vm, b[2], 1);
319 dpdk_validate_rte_mbuf (vm, b[3], 1);
323 dpdk_validate_rte_mbuf (vm, b[0], 0);
324 dpdk_validate_rte_mbuf (vm, b[1], 0);
325 dpdk_validate_rte_mbuf (vm, b[2], 0);
326 dpdk_validate_rte_mbuf (vm, b[3], 0);
329 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
330 (or_flags & VNET_BUFFER_F_OFFLOAD)))
332 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
333 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
334 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
335 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
338 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
340 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
341 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
342 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
343 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
344 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
345 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
346 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
347 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
353 #elif (CLIB_N_PREFETCHES >= 4)
356 vlib_buffer_t *b2, *b3;
359 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, STORE);
360 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, STORE);
361 b2 = vlib_buffer_from_rte_mbuf (mb[2]);
362 CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, LOAD);
363 b3 = vlib_buffer_from_rte_mbuf (mb[3]);
364 CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, LOAD);
366 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
367 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
369 or_flags = b[0]->flags | b[1]->flags;
370 all_or_flags |= or_flags;
372 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
374 dpdk_validate_rte_mbuf (vm, b[0], 1);
375 dpdk_validate_rte_mbuf (vm, b[1], 1);
379 dpdk_validate_rte_mbuf (vm, b[0], 0);
380 dpdk_validate_rte_mbuf (vm, b[1], 0);
383 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
384 (or_flags & VNET_BUFFER_F_OFFLOAD)))
386 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
387 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
390 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
392 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
393 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
394 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
395 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
405 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
406 all_or_flags |= b[0]->flags;
408 dpdk_validate_rte_mbuf (vm, b[0], 1);
409 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
411 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
412 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
413 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
419 /* transmit as many packets as possible */
420 tx_pkts = n_packets = mb - ptd->mbufs;
421 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets);
424 /* If there is no callback then drop any non-transmitted packets */
425 if (PREDICT_FALSE (n_left))
428 vlib_simple_counter_main_t *cm;
429 vnet_main_t *vnm = vnet_get_main ();
431 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
432 VNET_INTERFACE_COUNTER_TX_ERROR);
434 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
437 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
441 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
449 dpdk_clear_hw_interface_counters (u32 instance)
451 dpdk_main_t *dm = &dpdk_main;
452 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
454 rte_eth_stats_reset (xd->port_id);
455 rte_eth_xstats_reset (xd->port_id);
458 static clib_error_t *
459 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
461 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
462 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
463 dpdk_main_t *dm = &dpdk_main;
464 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
466 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
467 return clib_error_return (0, "Interface not initialized");
471 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
472 dpdk_device_start (xd);
473 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
474 f64 now = vlib_time_now (dm->vlib_main);
475 dpdk_update_counters (xd, now);
476 dpdk_update_link_state (xd, now);
480 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
481 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
482 dpdk_device_stop (xd);
483 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
486 return /* no error */ 0;
490 * Dynamically redirect all pkts from a specific interface
491 * to the specified node
494 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
497 dpdk_main_t *xm = &dpdk_main;
498 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
499 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
501 /* Shut off redirection */
502 if (node_index == ~0)
504 xd->per_interface_next_index = node_index;
508 xd->per_interface_next_index =
509 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
513 static clib_error_t *
514 dpdk_subif_add_del_function (vnet_main_t * vnm,
516 struct vnet_sw_interface_t *st, int is_add)
518 dpdk_main_t *xm = &dpdk_main;
519 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
520 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
521 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
523 u32 prev_subifs = xd->num_subifs;
524 clib_error_t *err = 0;
528 else if (xd->num_subifs)
531 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
534 /* currently we program VLANS only for IXGBE VF */
535 if (xd->pmd != VNET_DPDK_PMD_IXGBEVF)
538 if (t->sub.eth.flags.no_tags == 1)
541 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
543 xd->num_subifs = prev_subifs;
544 err = clib_error_return (0, "unsupported VLAN setup");
548 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
549 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
551 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
553 xd->num_subifs = prev_subifs;
554 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
561 rte_eth_dev_vlan_filter (xd->port_id,
562 t->sub.eth.outer_vlan_id, is_add)))
564 xd->num_subifs = prev_subifs;
565 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
572 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
574 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
579 static clib_error_t *
580 dpdk_interface_set_rss_queues (struct vnet_main_t *vnm,
581 struct vnet_hw_interface_t *hi,
582 clib_bitmap_t * bitmap)
584 dpdk_main_t *xm = &dpdk_main;
585 u32 hw_if_index = hi->hw_if_index;
586 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
587 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
588 clib_error_t *err = 0;
589 struct rte_eth_rss_reta_entry64 *reta_conf = NULL;
590 struct rte_eth_dev_info dev_info;
592 u16 *valid_queue = NULL;
593 u16 valid_queue_count = 0;
597 rte_eth_dev_info_get (xd->port_id, &dev_info);
599 /* parameter check */
600 if (clib_bitmap_count_set_bits (bitmap) == 0)
602 err = clib_error_return (0, "must assign at least one valid rss queue");
606 if (clib_bitmap_count_set_bits (bitmap) > dev_info.nb_rx_queues)
608 err = clib_error_return (0, "too many rss queues");
613 reta = clib_mem_alloc (dev_info.reta_size * sizeof (*reta));
616 err = clib_error_return (0, "clib_mem_alloc failed");
620 clib_memset (reta, 0, dev_info.reta_size * sizeof (*reta));
622 valid_queue_count = 0;
624 clib_bitmap_foreach (i, bitmap) {
625 if (i >= dev_info.nb_rx_queues)
627 err = clib_error_return (0, "illegal queue number");
630 reta[valid_queue_count++] = i;
634 /* check valid_queue_count not zero, make coverity happy */
635 if (valid_queue_count == 0)
637 err = clib_error_return (0, "must assign at least one valid rss queue");
642 for (i = valid_queue_count, j = 0; i < dev_info.reta_size; i++, j++)
644 j = j % valid_queue_count;
645 reta[i] = valid_queue[j];
648 /* update reta table */
650 (struct rte_eth_rss_reta_entry64 *) clib_mem_alloc (dev_info.reta_size /
651 RTE_RETA_GROUP_SIZE *
652 sizeof (*reta_conf));
653 if (reta_conf == NULL)
655 err = clib_error_return (0, "clib_mem_alloc failed");
659 clib_memset (reta_conf, 0,
660 dev_info.reta_size / RTE_RETA_GROUP_SIZE *
661 sizeof (*reta_conf));
663 for (i = 0; i < dev_info.reta_size; i++)
665 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
666 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
668 reta_conf[reta_id].mask = UINT64_MAX;
669 reta_conf[reta_id].reta[reta_pos] = reta[i];
673 rte_eth_dev_rss_reta_update (xd->port_id, reta_conf, dev_info.reta_size);
676 err = clib_error_return (0, "rte_eth_dev_rss_reta_update err %d", ret);
682 clib_mem_free (reta);
684 clib_mem_free (reta_conf);
689 static clib_error_t *
690 dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
691 vnet_hw_if_rx_mode mode)
693 dpdk_main_t *xm = &dpdk_main;
694 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
695 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
696 clib_file_main_t *fm = &file_main;
697 dpdk_rx_queue_t *rxq;
700 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
701 return clib_error_return (0, "unsupported op (is the interface up?)", rv);
702 if (mode == VNET_HW_IF_RX_MODE_POLLING &&
703 !(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
704 rv = rte_eth_dev_rx_intr_disable (xd->port_id, qid);
705 else if (mode == VNET_HW_IF_RX_MODE_POLLING)
707 rxq = vec_elt_at_index (xd->rx_queues, qid);
708 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
709 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
711 else if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
712 rv = rte_eth_dev_rx_intr_enable (xd->port_id, qid);
715 rxq = vec_elt_at_index (xd->rx_queues, qid);
716 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
717 fm->file_update (f, UNIX_FILE_UPDATE_ADD);
720 return clib_error_return (0, "dpdk_interface_rx_mode_change err %d", rv);
725 VNET_DEVICE_CLASS (dpdk_device_class) = {
727 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
728 .tx_function_error_strings = dpdk_tx_func_error_strings,
729 .format_device_name = format_dpdk_device_name,
730 .format_device = format_dpdk_device,
731 .format_tx_trace = format_dpdk_tx_trace,
732 .clear_counters = dpdk_clear_hw_interface_counters,
733 .admin_up_down_function = dpdk_interface_admin_up_down,
734 .subif_add_del_function = dpdk_subif_add_del_function,
735 .rx_redirect_to_node = dpdk_set_interface_next_node,
736 .mac_addr_change_function = dpdk_set_mac_address,
737 .mac_addr_add_del_function = dpdk_add_del_mac_address,
738 .format_flow = format_dpdk_flow,
739 .flow_ops_function = dpdk_flow_ops_fn,
740 .set_rss_queues_function = dpdk_interface_set_rss_queues,
741 .rx_mode_change_function = dpdk_interface_rx_mode_change,
745 #define UP_DOWN_FLAG_EVENT 1
748 admin_up_down_process (vlib_main_t * vm,
749 vlib_node_runtime_t * rt, vlib_frame_t * f)
751 clib_error_t *error = 0;
753 uword *event_data = 0;
759 vlib_process_wait_for_event (vm);
761 event_type = vlib_process_get_events (vm, &event_data);
763 dpdk_main.admin_up_down_in_progress = 1;
767 case UP_DOWN_FLAG_EVENT:
769 if (vec_len (event_data) == 2)
771 sw_if_index = event_data[0];
772 flags = event_data[1];
774 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
776 clib_error_report (error);
782 vec_reset_length (event_data);
784 dpdk_main.admin_up_down_in_progress = 0;
787 return 0; /* or not */
791 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
792 .function = admin_up_down_process,
793 .type = VLIB_NODE_TYPE_PROCESS,
794 .name = "admin-up-down-process",
795 .process_log2_n_stack_bytes = 17, // 256KB
800 * fd.io coding-style-patch-verification: ON
803 * eval: (c-set-style "gnu")