2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
29 #include <dpdk/device/dpdk_priv.h>
31 #ifndef CLIB_MARCH_VARIANT
32 static char *dpdk_error_strings[] = {
39 STATIC_ASSERT (VNET_DEVICE_INPUT_NEXT_IP4_INPUT - 1 ==
40 VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,
41 "IP4_INPUT must follow IP4_NCS_INPUT");
45 DPDK_RX_F_CKSUM_GOOD = 7,
46 DPDK_RX_F_CKSUM_BAD = 4,
50 /* currently we are just copying bit positions from DPDK, but that
51 might change in future, in case we strart to be interested in something
52 stored in upper bytes. Curently we store only lower byte for perf reasons */
53 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_GOOD == PKT_RX_IP_CKSUM_GOOD, "");
54 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_BAD == PKT_RX_IP_CKSUM_BAD, "");
55 STATIC_ASSERT (1 << DPDK_RX_F_FDIR == PKT_RX_FDIR, "");
56 STATIC_ASSERT ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
57 256, "dpdk flags not un lower byte, fix needed");
60 dpdk_rx_next (vlib_node_runtime_t * node, u16 etype, u8 flags)
62 if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP4)))
64 /* keep it branchless */
65 u32 is_good = (flags >> DPDK_RX_F_CKSUM_GOOD) & 1;
66 return VNET_DEVICE_INPUT_NEXT_IP4_INPUT - is_good;
68 else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)))
69 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
70 else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS)))
71 return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
73 return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
76 static_always_inline uword
77 dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
78 struct rte_mbuf * mb, vlib_buffer_free_list_t * fl)
81 struct rte_mbuf *mb_seg = 0;
82 vlib_buffer_t *b_seg, *b_chain = 0;
89 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
90 b->total_length_not_including_first_buffer = 0;
92 while (nb_seg < mb->nb_segs)
96 b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
97 vlib_buffer_init_for_free_list (b_seg, fl);
99 ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
100 ASSERT (b_seg->current_data == 0);
103 * The driver (e.g. virtio) may not put the packet data at the start
104 * of the segment, so don't assume b_seg->current_data == 0 is correct.
106 b_seg->current_data =
107 (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
109 b_seg->current_length = mb_seg->data_len;
110 b->total_length_not_including_first_buffer += mb_seg->data_len;
112 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
113 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
116 mb_seg = mb_seg->next;
119 return b->total_length_not_including_first_buffer;
122 static_always_inline void
123 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
125 CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
126 CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
127 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
128 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
131 static_always_inline void
132 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
135 b = vlib_buffer_from_rte_mbuf (mb[0]);
136 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
137 b = vlib_buffer_from_rte_mbuf (mb[1]);
138 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
139 b = vlib_buffer_from_rte_mbuf (mb[2]);
140 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
141 b = vlib_buffer_from_rte_mbuf (mb[3]);
142 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
145 static_always_inline void
146 dpdk_prefetch_buffer_data_x4 (struct rte_mbuf *mb[])
149 b = vlib_buffer_from_rte_mbuf (mb[0]);
150 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
151 b = vlib_buffer_from_rte_mbuf (mb[1]);
152 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
153 b = vlib_buffer_from_rte_mbuf (mb[2]);
154 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
155 b = vlib_buffer_from_rte_mbuf (mb[3]);
156 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
159 /** \brief Main DPDK input node
162 This is the main DPDK input node: across each assigned interface,
163 call rte_eth_rx_burst(...) or similar to obtain a vector of
164 packets to process. Derive @c vlib_buffer_t metadata from
165 <code>struct rte_mbuf</code> metadata,
166 Depending on the resulting metadata: adjust <code>b->current_data,
167 b->current_length </code> and dispatch directly to
168 ip4-input-no-checksum, or ip6-input. Trace the packet if required.
170 @param vm vlib_main_t corresponding to the current thread
171 @param node vlib_node_runtime_t
172 @param f vlib_frame_t input-node, not used.
174 @par Graph mechanics: buffer metadata, next index usage
177 - <code>struct rte_mbuf mb->ol_flags</code>
178 - PKT_RX_IP_CKSUM_BAD
181 - <code>b->error</code> if the packet is to be dropped immediately
182 - <code>b->current_data, b->current_length</code>
183 - adjusted as needed to skip the L2 header in direct-dispatch cases
184 - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
185 - rx interface sw_if_index
186 - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
187 - required by ipX-lookup
188 - <code>b->flags</code>
189 - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
192 - Static arcs to: error-drop, ethernet-input,
193 ip4-input-no-checksum, ip6-input, mpls-input
194 - per-interface redirection, controlled by
195 <code>xd->per_interface_next_index</code>
198 static_always_inline u8
199 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
203 for (i = 0; i < count; i++)
205 /* all flags we are interested in are in lower 8 bits but
207 flags[i] = (u8) mb[i]->ol_flags;
213 static_always_inline uword
214 dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd,
215 uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
217 u32 n_left = n_rx_packets;
219 vlib_buffer_free_list_t *fl;
220 struct rte_mbuf **mb = ptd->mbufs;
223 u8 *flags, or_flags = 0;
226 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
234 CLIB_PREFETCH (mb + 8, CLIB_CACHE_LINE_BYTES, LOAD);
236 dpdk_prefetch_buffer_x4 (mb + 4);
238 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
239 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
240 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
241 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
243 clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template);
245 dpdk_prefetch_mbuf_x4 (mb + 4);
247 or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
250 /* we temporary store relative offset of ethertype into next[x]
251 so we can prefetch and get it faster later */
253 off = mb[0]->data_off;
254 next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
255 off -= RTE_PKTMBUF_HEADROOM;
256 vnet_buffer (b[0])->l2_hdr_offset = off;
257 b[0]->current_data = off;
259 off = mb[1]->data_off;
260 next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
261 off -= RTE_PKTMBUF_HEADROOM;
262 vnet_buffer (b[1])->l2_hdr_offset = off;
263 b[1]->current_data = off;
265 off = mb[2]->data_off;
266 next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
267 off -= RTE_PKTMBUF_HEADROOM;
268 vnet_buffer (b[2])->l2_hdr_offset = off;
269 b[2]->current_data = off;
271 off = mb[3]->data_off;
272 next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
273 off -= RTE_PKTMBUF_HEADROOM;
274 vnet_buffer (b[3])->l2_hdr_offset = off;
275 b[3]->current_data = off;
277 b[0]->current_length = mb[0]->data_len;
278 b[1]->current_length = mb[1]->data_len;
279 b[2]->current_length = mb[2]->data_len;
280 b[3]->current_length = mb[3]->data_len;
282 n_bytes += mb[0]->data_len;
283 n_bytes += mb[1]->data_len;
284 n_bytes += mb[2]->data_len;
285 n_bytes += mb[3]->data_len;
289 n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
290 n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl);
291 n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl);
292 n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl);
295 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
296 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
297 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
298 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
308 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
309 clib_memcpy (b[0], &ptd->buffer_template, 64);
310 or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
313 off = mb[0]->data_off;
314 next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
315 off -= RTE_PKTMBUF_HEADROOM;
316 vnet_buffer (b[0])->l2_hdr_offset = off;
317 b[0]->current_data = off;
318 b[0]->current_length = mb[0]->data_len;
319 n_bytes += mb[0]->data_len;
321 n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
322 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
330 *or_flagsp = or_flags;
334 static_always_inline void
335 dpdk_set_next_from_etype (vlib_main_t * vm, vlib_node_runtime_t * node,
336 dpdk_per_thread_data_t * ptd, uword n_rx_packets)
341 struct rte_mbuf **mb = ptd->mbufs;
342 u8 *flags = ptd->flags;
343 u16 *next = ptd->next;
344 u32 n_left = n_rx_packets;
348 dpdk_prefetch_buffer_data_x4 (mb + 8);
349 dpdk_prefetch_buffer_x4 (mb + 8);
351 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
352 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
353 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
354 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
355 etype[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
356 etype[1] = *(u16 *) ((u8 *) mb[1] + next[1] + sizeof (vlib_buffer_t));
357 etype[2] = *(u16 *) ((u8 *) mb[2] + next[2] + sizeof (vlib_buffer_t));
358 etype[3] = *(u16 *) ((u8 *) mb[3] + next[3] + sizeof (vlib_buffer_t));
359 next[0] = dpdk_rx_next (node, etype[0], flags[0]);
360 next[1] = dpdk_rx_next (node, etype[1], flags[1]);
361 next[2] = dpdk_rx_next (node, etype[2], flags[2]);
362 next[3] = dpdk_rx_next (node, etype[3], flags[3]);
363 adv[0] = device_input_next_node_advance[next[0]];
364 adv[1] = device_input_next_node_advance[next[1]];
365 adv[2] = device_input_next_node_advance[next[2]];
366 adv[3] = device_input_next_node_advance[next[3]];
367 b[0]->current_data += adv[0];
368 b[1]->current_data += adv[1];
369 b[2]->current_data += adv[2];
370 b[3]->current_data += adv[3];
371 b[0]->current_length -= adv[0];
372 b[1]->current_length -= adv[1];
373 b[2]->current_length -= adv[2];
374 b[3]->current_length -= adv[3];
385 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
386 next[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
387 next[0] = dpdk_rx_next (node, next[0], flags[0]);
388 adv[0] = device_input_next_node_advance[next[0]];
389 b[0]->current_data += adv[0];
390 b[0]->current_length -= adv[0];
400 static_always_inline void
401 dpdk_process_flow_offload (dpdk_device_t * xd, dpdk_per_thread_data_t * ptd,
405 dpdk_flow_lookup_entry_t *fle;
408 /* TODO prefetch and quad-loop */
409 for (n = 0; n < n_rx_packets; n++)
411 if ((ptd->flags[n] & (1 << DPDK_RX_F_FDIR)) == 0)
414 fle = pool_elt_at_index (xd->flow_lookup_entries,
415 ptd->mbufs[n]->hash.fdir.hi);
417 if (fle->next_index != (u16) ~ 0)
418 ptd->next[n] = fle->next_index;
420 if (fle->flow_id != ~0)
422 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
423 b0->flow_id = fle->flow_id;
426 if (fle->buffer_advance != ~0)
428 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
429 vlib_buffer_advance (b0, fle->buffer_advance);
434 static_always_inline u32
435 dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
436 vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
438 uword n_rx_packets = 0, n_rx_bytes;
441 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
442 struct rte_mbuf **mb;
449 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
451 vlib_buffer_t *bt = &ptd->buffer_template;
453 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
456 /* get up to DPDK_RX_BURST_SZ buffers from PMD */
457 while (n_rx_packets < DPDK_RX_BURST_SZ)
459 n = rte_eth_rx_burst (xd->port_id, queue_id,
460 ptd->mbufs + n_rx_packets,
461 DPDK_RX_BURST_SZ - n_rx_packets);
468 if (n_rx_packets == 0)
471 /* Update buffer template */
472 vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
473 bt->error = node->errors[DPDK_ERROR_NONE];
474 /* as DPDK is allocating empty buffers from mempool provided before interface
475 start for each queue, it is safe to store this in the template */
476 bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
478 /* receive burst of packets from DPDK PMD */
479 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
482 next_index = xd->per_interface_next_index;
485 /* as all packets belong to the same interface feature arc lookup
486 can be don once and result stored in the buffer template */
487 if (PREDICT_FALSE (vnet_device_input_have_features (xd->sw_if_index)))
489 vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
493 if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
494 n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
496 n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
498 if (PREDICT_FALSE (known_next))
500 for (n = 0; n < n_rx_packets; n++)
501 ptd->next[n] = next_index;
503 vnet_buffer (bt)->feature_arc_index = 0;
504 bt->current_config_index = 0;
507 dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets);
509 /* flow offload - process if rx flow offlaod enabled and at least one packet
511 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
512 (or_flags & (1 << DPDK_RX_F_FDIR))))
513 dpdk_process_flow_offload (xd, ptd, n_rx_packets);
515 /* is at least one packet marked as ip4 checksum bad? */
516 if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD)))
517 for (n = 0; n < n_rx_packets; n++)
519 if ((ptd->flags[n] & (1 << DPDK_RX_F_CKSUM_BAD)) == 0)
521 if (ptd->next[n] != VNET_DEVICE_INPUT_NEXT_IP4_INPUT)
524 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
525 b0->error = node->errors[DPDK_ERROR_IP_CHECKSUM_ERROR];
526 ptd->next[n] = VNET_DEVICE_INPUT_NEXT_DROP;
529 /* enqueue buffers to the next node */
530 vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, ptd->buffers,
532 sizeof (struct rte_mbuf));
534 vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
537 /* packet trace if enabled */
538 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
540 n_left = n_rx_packets;
541 buffers = ptd->buffers;
544 while (n_trace && n_left)
546 b0 = vlib_get_buffer (vm, buffers[0]);
547 vlib_trace_buffer (vm, node, next[0], b0, /* follow_chain */ 0);
549 dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
550 t0->queue_index = queue_id;
551 t0->device_index = xd->device_index;
552 t0->buffer_index = vlib_get_buffer_index (vm, b0);
554 clib_memcpy (&t0->mb, mb[0], sizeof t0->mb);
555 clib_memcpy (&t0->buffer, b0, sizeof b0[0] - sizeof b0->pre_data);
556 clib_memcpy (t0->buffer.pre_data, b0->data,
557 sizeof t0->buffer.pre_data);
558 clib_memcpy (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
566 vlib_set_trace_count (vm, node, n_trace);
569 /* rx pcap capture if enabled */
570 if (PREDICT_FALSE (dm->pcap[VLIB_RX].pcap_enable))
573 n_left = n_rx_packets;
574 buffers = ptd->buffers;
578 b0 = vlib_get_buffer (vm, bi0);
581 if (dm->pcap[VLIB_RX].pcap_sw_if_index == 0 ||
582 dm->pcap[VLIB_RX].pcap_sw_if_index
583 == vnet_buffer (b0)->sw_if_index[VLIB_RX])
590 * Note: current_data will have advanced
591 * when we skip ethernet input.
592 * Temporarily back up to the original DMA
593 * target, so we capture a valid ethernet frame
595 mb = rte_mbuf_from_vlib_buffer (b0);
597 /* Figure out the original data_start */
598 data_start = (mb->buf_addr + mb->data_off) - (void *) b0->data;
599 /* Back up that far */
600 temp_advance = b0->current_data - data_start;
601 vlib_buffer_advance (b0, -temp_advance);
602 /* Trace the packet */
603 pcap_add_buffer (&dm->pcap[VLIB_RX].pcap_main, vm, bi0, 512);
604 /* and advance again */
605 vlib_buffer_advance (b0, temp_advance);
611 vlib_increment_combined_counter
612 (vnet_get_main ()->interface_main.combined_sw_if_counters
613 + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
614 n_rx_packets, n_rx_bytes);
616 vnet_device_increment_rx_packets (thread_index, n_rx_packets);
621 VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
624 dpdk_main_t *dm = &dpdk_main;
626 uword n_rx_packets = 0;
627 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
628 vnet_device_and_queue_t *dq;
629 u32 thread_index = node->thread_index;
632 * Poll all devices on this cpu for input/interrupts.
635 foreach_device_and_queue (dq, rt->devices_and_queues)
637 xd = vec_elt_at_index(dm->devices, dq->dev_instance);
638 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
639 continue; /* Do not poll slave to a bonded interface */
640 n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
647 #ifndef CLIB_MARCH_VARIANT
649 VLIB_REGISTER_NODE (dpdk_input_node) = {
650 .type = VLIB_NODE_TYPE_INPUT,
651 .name = "dpdk-input",
652 .sibling_of = "device-input",
654 /* Will be enabled if/when hardware is detected. */
655 .state = VLIB_NODE_STATE_DISABLED,
657 .format_buffer = format_ethernet_header_with_length,
658 .format_trace = format_dpdk_rx_trace,
660 .n_errors = DPDK_N_ERROR,
661 .error_strings = dpdk_error_strings,
667 * fd.io coding-style-patch-verification: ON
670 * eval: (c-set-style "gnu")