2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
29 #include <dpdk/device/dpdk_priv.h>
31 static char *dpdk_error_strings[] = {
37 STATIC_ASSERT (VNET_DEVICE_INPUT_NEXT_IP4_INPUT - 1 ==
38 VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,
39 "IP4_INPUT must follow IP4_NCS_INPUT");
43 DPDK_RX_F_CKSUM_GOOD = 7,
44 DPDK_RX_F_CKSUM_BAD = 4,
48 /* currently we are just copying bit positions from DPDK, but that
49 might change in future, in case we start to be interested in something
50 stored in upper bytes. Currently we store only lower byte for perf reasons */
51 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_GOOD == PKT_RX_IP_CKSUM_GOOD, "");
52 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_BAD == PKT_RX_IP_CKSUM_BAD, "");
53 STATIC_ASSERT (1 << DPDK_RX_F_FDIR == PKT_RX_FDIR, "");
54 STATIC_ASSERT ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
55 256, "dpdk flags not un lower byte, fix needed");
58 dpdk_rx_next (vlib_node_runtime_t * node, u16 etype, u8 flags)
60 if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP4)))
62 /* keep it branchless */
63 u32 is_good = (flags >> DPDK_RX_F_CKSUM_GOOD) & 1;
64 return VNET_DEVICE_INPUT_NEXT_IP4_INPUT - is_good;
66 else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)))
67 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
68 else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS)))
69 return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
71 return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
74 static_always_inline uword
75 dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
76 struct rte_mbuf * mb, vlib_buffer_free_list_t * fl)
79 struct rte_mbuf *mb_seg = 0;
80 vlib_buffer_t *b_seg, *b_chain = 0;
87 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
88 b->total_length_not_including_first_buffer = 0;
90 while (nb_seg < mb->nb_segs)
94 b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
95 vlib_buffer_init_for_free_list (b_seg, fl);
97 ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
98 ASSERT (b_seg->current_data == 0);
101 * The driver (e.g. virtio) may not put the packet data at the start
102 * of the segment, so don't assume b_seg->current_data == 0 is correct.
104 b_seg->current_data =
105 (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
107 b_seg->current_length = mb_seg->data_len;
108 b->total_length_not_including_first_buffer += mb_seg->data_len;
110 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
111 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
114 mb_seg = mb_seg->next;
117 return b->total_length_not_including_first_buffer;
120 static_always_inline void
121 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
123 CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
124 CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
125 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
126 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
129 static_always_inline void
130 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
133 b = vlib_buffer_from_rte_mbuf (mb[0]);
134 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
135 b = vlib_buffer_from_rte_mbuf (mb[1]);
136 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
137 b = vlib_buffer_from_rte_mbuf (mb[2]);
138 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
139 b = vlib_buffer_from_rte_mbuf (mb[3]);
140 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
143 static_always_inline void
144 dpdk_prefetch_buffer_data_x4 (struct rte_mbuf *mb[])
147 b = vlib_buffer_from_rte_mbuf (mb[0]);
148 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
149 b = vlib_buffer_from_rte_mbuf (mb[1]);
150 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
151 b = vlib_buffer_from_rte_mbuf (mb[2]);
152 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
153 b = vlib_buffer_from_rte_mbuf (mb[3]);
154 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
157 /** \brief Main DPDK input node
160 This is the main DPDK input node: across each assigned interface,
161 call rte_eth_rx_burst(...) or similar to obtain a vector of
162 packets to process. Derive @c vlib_buffer_t metadata from
163 <code>struct rte_mbuf</code> metadata,
164 Depending on the resulting metadata: adjust <code>b->current_data,
165 b->current_length </code> and dispatch directly to
166 ip4-input-no-checksum, or ip6-input. Trace the packet if required.
168 @param vm vlib_main_t corresponding to the current thread
169 @param node vlib_node_runtime_t
170 @param f vlib_frame_t input-node, not used.
172 @par Graph mechanics: buffer metadata, next index usage
175 - <code>struct rte_mbuf mb->ol_flags</code>
176 - PKT_RX_IP_CKSUM_BAD
179 - <code>b->error</code> if the packet is to be dropped immediately
180 - <code>b->current_data, b->current_length</code>
181 - adjusted as needed to skip the L2 header in direct-dispatch cases
182 - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
183 - rx interface sw_if_index
184 - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
185 - required by ipX-lookup
186 - <code>b->flags</code>
187 - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
190 - Static arcs to: error-drop, ethernet-input,
191 ip4-input-no-checksum, ip6-input, mpls-input
192 - per-interface redirection, controlled by
193 <code>xd->per_interface_next_index</code>
196 static_always_inline u8
197 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
201 for (i = 0; i < count; i++)
203 /* all flags we are interested in are in lower 8 bits but
205 flags[i] = (u8) mb[i]->ol_flags;
211 static_always_inline uword
212 dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd,
213 uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
215 u32 n_left = n_rx_packets;
217 vlib_buffer_free_list_t *fl;
218 struct rte_mbuf **mb = ptd->mbufs;
221 u8 *flags, or_flags = 0;
224 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
232 CLIB_PREFETCH (mb + 8, CLIB_CACHE_LINE_BYTES, LOAD);
234 dpdk_prefetch_buffer_x4 (mb + 4);
236 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
237 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
238 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
239 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
241 clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template);
243 dpdk_prefetch_mbuf_x4 (mb + 4);
245 or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
248 /* we temporary store relative offset of ethertype into next[x]
249 so we can prefetch and get it faster later */
251 off = mb[0]->data_off;
252 next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
253 off -= RTE_PKTMBUF_HEADROOM;
254 vnet_buffer (b[0])->l2_hdr_offset = off;
255 b[0]->current_data = off;
257 off = mb[1]->data_off;
258 next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
259 off -= RTE_PKTMBUF_HEADROOM;
260 vnet_buffer (b[1])->l2_hdr_offset = off;
261 b[1]->current_data = off;
263 off = mb[2]->data_off;
264 next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
265 off -= RTE_PKTMBUF_HEADROOM;
266 vnet_buffer (b[2])->l2_hdr_offset = off;
267 b[2]->current_data = off;
269 off = mb[3]->data_off;
270 next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
271 off -= RTE_PKTMBUF_HEADROOM;
272 vnet_buffer (b[3])->l2_hdr_offset = off;
273 b[3]->current_data = off;
275 b[0]->current_length = mb[0]->data_len;
276 b[1]->current_length = mb[1]->data_len;
277 b[2]->current_length = mb[2]->data_len;
278 b[3]->current_length = mb[3]->data_len;
280 n_bytes += mb[0]->data_len;
281 n_bytes += mb[1]->data_len;
282 n_bytes += mb[2]->data_len;
283 n_bytes += mb[3]->data_len;
287 n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
288 n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl);
289 n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl);
290 n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl);
293 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
294 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
295 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
296 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
306 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
307 clib_memcpy (b[0], &ptd->buffer_template, 64);
308 or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
311 off = mb[0]->data_off;
312 next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
313 off -= RTE_PKTMBUF_HEADROOM;
314 vnet_buffer (b[0])->l2_hdr_offset = off;
315 b[0]->current_data = off;
316 b[0]->current_length = mb[0]->data_len;
317 n_bytes += mb[0]->data_len;
319 n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
320 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
328 *or_flagsp = or_flags;
332 static_always_inline void
333 dpdk_set_next_from_etype (vlib_main_t * vm, vlib_node_runtime_t * node,
334 dpdk_per_thread_data_t * ptd, uword n_rx_packets)
339 struct rte_mbuf **mb = ptd->mbufs;
340 u8 *flags = ptd->flags;
341 u16 *next = ptd->next;
342 u32 n_left = n_rx_packets;
346 dpdk_prefetch_buffer_data_x4 (mb + 8);
347 dpdk_prefetch_buffer_x4 (mb + 8);
349 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
350 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
351 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
352 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
353 etype[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
354 etype[1] = *(u16 *) ((u8 *) mb[1] + next[1] + sizeof (vlib_buffer_t));
355 etype[2] = *(u16 *) ((u8 *) mb[2] + next[2] + sizeof (vlib_buffer_t));
356 etype[3] = *(u16 *) ((u8 *) mb[3] + next[3] + sizeof (vlib_buffer_t));
357 next[0] = dpdk_rx_next (node, etype[0], flags[0]);
358 next[1] = dpdk_rx_next (node, etype[1], flags[1]);
359 next[2] = dpdk_rx_next (node, etype[2], flags[2]);
360 next[3] = dpdk_rx_next (node, etype[3], flags[3]);
361 adv[0] = device_input_next_node_advance[next[0]];
362 adv[1] = device_input_next_node_advance[next[1]];
363 adv[2] = device_input_next_node_advance[next[2]];
364 adv[3] = device_input_next_node_advance[next[3]];
365 b[0]->current_data += adv[0];
366 b[1]->current_data += adv[1];
367 b[2]->current_data += adv[2];
368 b[3]->current_data += adv[3];
369 b[0]->current_length -= adv[0];
370 b[1]->current_length -= adv[1];
371 b[2]->current_length -= adv[2];
372 b[3]->current_length -= adv[3];
383 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
384 next[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
385 next[0] = dpdk_rx_next (node, next[0], flags[0]);
386 adv[0] = device_input_next_node_advance[next[0]];
387 b[0]->current_data += adv[0];
388 b[0]->current_length -= adv[0];
398 static_always_inline void
399 dpdk_process_flow_offload (dpdk_device_t * xd, dpdk_per_thread_data_t * ptd,
403 dpdk_flow_lookup_entry_t *fle;
406 /* TODO prefetch and quad-loop */
407 for (n = 0; n < n_rx_packets; n++)
409 if ((ptd->flags[n] & (1 << DPDK_RX_F_FDIR)) == 0)
412 fle = pool_elt_at_index (xd->flow_lookup_entries,
413 ptd->mbufs[n]->hash.fdir.hi);
415 if (fle->next_index != (u16) ~ 0)
416 ptd->next[n] = fle->next_index;
418 if (fle->flow_id != ~0)
420 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
421 b0->flow_id = fle->flow_id;
424 if (fle->buffer_advance != ~0)
426 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
427 vlib_buffer_advance (b0, fle->buffer_advance);
432 static_always_inline u32
433 dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
434 vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
436 uword n_rx_packets = 0, n_rx_bytes;
439 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
440 struct rte_mbuf **mb;
447 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
449 vlib_buffer_t *bt = &ptd->buffer_template;
451 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
454 /* get up to DPDK_RX_BURST_SZ buffers from PMD */
455 while (n_rx_packets < DPDK_RX_BURST_SZ)
457 n = rte_eth_rx_burst (xd->port_id, queue_id,
458 ptd->mbufs + n_rx_packets,
459 DPDK_RX_BURST_SZ - n_rx_packets);
466 if (n_rx_packets == 0)
469 /* Update buffer template */
470 vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
471 bt->error = node->errors[DPDK_ERROR_NONE];
472 /* as DPDK is allocating empty buffers from mempool provided before interface
473 start for each queue, it is safe to store this in the template */
474 bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
476 /* receive burst of packets from DPDK PMD */
477 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
480 next_index = xd->per_interface_next_index;
483 /* as all packets belong to the same interface feature arc lookup
484 can be don once and result stored in the buffer template */
485 if (PREDICT_FALSE (vnet_device_input_have_features (xd->sw_if_index)))
487 vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
491 if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
492 n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
494 n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
496 if (PREDICT_FALSE (known_next))
498 for (n = 0; n < n_rx_packets; n++)
499 ptd->next[n] = next_index;
501 vnet_buffer (bt)->feature_arc_index = 0;
502 bt->current_config_index = 0;
505 dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets);
507 /* flow offload - process if rx flow offload enabled and at least one packet
509 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
510 (or_flags & (1 << DPDK_RX_F_FDIR))))
511 dpdk_process_flow_offload (xd, ptd, n_rx_packets);
513 /* is at least one packet marked as ip4 checksum bad? */
514 if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD)))
515 for (n = 0; n < n_rx_packets; n++)
517 if ((ptd->flags[n] & (1 << DPDK_RX_F_CKSUM_BAD)) == 0)
519 if (ptd->next[n] != VNET_DEVICE_INPUT_NEXT_IP4_INPUT)
522 b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
523 b0->error = node->errors[DPDK_ERROR_IP_CHECKSUM_ERROR];
524 ptd->next[n] = VNET_DEVICE_INPUT_NEXT_DROP;
527 /* enqueue buffers to the next node */
528 vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, ptd->buffers,
530 sizeof (struct rte_mbuf));
532 vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
535 /* packet trace if enabled */
536 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
538 n_left = n_rx_packets;
539 buffers = ptd->buffers;
542 while (n_trace && n_left)
544 b0 = vlib_get_buffer (vm, buffers[0]);
545 vlib_trace_buffer (vm, node, next[0], b0, /* follow_chain */ 0);
547 dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
548 t0->queue_index = queue_id;
549 t0->device_index = xd->device_index;
550 t0->buffer_index = vlib_get_buffer_index (vm, b0);
552 clib_memcpy (&t0->mb, mb[0], sizeof t0->mb);
553 clib_memcpy (&t0->buffer, b0, sizeof b0[0] - sizeof b0->pre_data);
554 clib_memcpy (t0->buffer.pre_data, b0->data,
555 sizeof t0->buffer.pre_data);
556 clib_memcpy (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
564 vlib_set_trace_count (vm, node, n_trace);
567 /* rx pcap capture if enabled */
568 if (PREDICT_FALSE (dm->pcap[VLIB_RX].pcap_enable))
571 n_left = n_rx_packets;
572 buffers = ptd->buffers;
576 b0 = vlib_get_buffer (vm, bi0);
579 if (dm->pcap[VLIB_RX].pcap_sw_if_index == 0 ||
580 dm->pcap[VLIB_RX].pcap_sw_if_index
581 == vnet_buffer (b0)->sw_if_index[VLIB_RX])
588 * Note: current_data will have advanced
589 * when we skip ethernet input.
590 * Temporarily back up to the original DMA
591 * target, so we capture a valid ethernet frame
593 mb = rte_mbuf_from_vlib_buffer (b0);
595 /* Figure out the original data_start */
596 data_start = (mb->buf_addr + mb->data_off) - (void *) b0->data;
597 /* Back up that far */
598 temp_advance = b0->current_data - data_start;
599 vlib_buffer_advance (b0, -temp_advance);
600 /* Trace the packet */
601 pcap_add_buffer (&dm->pcap[VLIB_RX].pcap_main, vm, bi0, 512);
602 /* and advance again */
603 vlib_buffer_advance (b0, temp_advance);
609 vlib_increment_combined_counter
610 (vnet_get_main ()->interface_main.combined_sw_if_counters
611 + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
612 n_rx_packets, n_rx_bytes);
614 vnet_device_increment_rx_packets (thread_index, n_rx_packets);
619 VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
622 dpdk_main_t *dm = &dpdk_main;
624 uword n_rx_packets = 0;
625 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
626 vnet_device_and_queue_t *dq;
627 u32 thread_index = node->thread_index;
630 * Poll all devices on this cpu for input/interrupts.
633 foreach_device_and_queue (dq, rt->devices_and_queues)
635 xd = vec_elt_at_index(dm->devices, dq->dev_instance);
636 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
637 continue; /* Do not poll slave to a bonded interface */
638 n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
646 VLIB_REGISTER_NODE (dpdk_input_node) = {
647 .type = VLIB_NODE_TYPE_INPUT,
648 .name = "dpdk-input",
649 .sibling_of = "device-input",
651 /* Will be enabled if/when hardware is detected. */
652 .state = VLIB_NODE_STATE_DISABLED,
654 .format_buffer = format_ethernet_header_with_length,
655 .format_trace = format_dpdk_rx_trace,
657 .n_errors = DPDK_N_ERROR,
658 .error_strings = dpdk_error_strings,
663 * fd.io coding-style-patch-verification: ON
666 * eval: (c-set-style "gnu")