2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ioam_cache_tunnel_select_node.c
17 * This file implements anycast server selection using ioam data
18 * attached to anycast service selection.
19 * Anycast service is reachable via multiple servers reachable
21 * Works with TCP Anycast application.
22 * Cache entry is created when TCP SYN is received for anycast destination.
23 * Response TCP SYN ACKs for anycast service is compared and selected
24 * response is forwarded.
25 * The functionality is introduced via graph nodes that are hooked into
26 * vnet graph via classifier configs like below:
28 * Enable anycast service selection:
29 * set ioam ip6 sr-tunnel-select oneway
31 * Enable following classifier on the anycast service client facing interface
32 * e.g. anycast service is db06::06 then:
33 * classify session acl-hit-next ip6-node ip6-add-syn-hop-by-hop table-index 0 match l3
34 * ip6 dst db06::06 ioam-encap anycast
36 * Enable following classifier on the interfaces facing the server of anycast service:
37 * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3
38 * ip6 src db06::06 ioam-decap anycast
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/pg/pg.h>
44 #include <vppinfra/error.h>
45 #include <vnet/ip/ip.h>
46 #include <vnet/srv6/sr.h>
47 #include <ioam/ip6/ioam_cache.h>
48 #include <vnet/ip/ip6_hop_by_hop.h>
49 #include <vnet/ip/ip6_hop_by_hop_packet.h>
57 /* packet trace format function */
59 format_cache_ts_trace (u8 * s, va_list * args)
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 cache_ts_trace_t *t = va_arg (*args, cache_ts_trace_t *);
65 s = format (s, "CACHE: flow_label %d, next index %d",
66 t->flow_label, t->next_index);
70 #define foreach_cache_ts_error \
71 _(RECORDED, "ip6 iOAM headers cached")
75 #define _(sym,str) CACHE_TS_ERROR_##sym,
76 foreach_cache_ts_error
81 static char *cache_ts_error_strings[] = {
82 #define _(sym,string) string,
83 foreach_cache_ts_error
89 IOAM_CACHE_TS_NEXT_POP_HBYH,
90 IOAM_CACHE_TS_ERROR_NEXT_DROP,
95 ip6_ioam_cache_ts_node_fn (vlib_main_t * vm,
96 vlib_node_runtime_t * node, vlib_frame_t * frame)
98 ioam_cache_main_t *cm = &ioam_cache_main;
99 u32 n_left_from, *from, *to_next;
100 cache_ts_next_t next_index;
103 from = vlib_frame_vector_args (frame);
104 n_left_from = frame->n_vectors;
105 next_index = node->cached_next_index;
107 while (n_left_from > 0)
111 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
113 while (n_left_from > 0 && n_left_to_next > 0)
117 u32 next0 = IOAM_CACHE_TS_NEXT_POP_HBYH;
119 ip6_hop_by_hop_header_t *hbh0, *hbh_cmp;
122 u32 cache_ts_index = 0;
123 u8 cache_thread_id = 0;
131 p0 = vlib_get_buffer (vm, bi0);
132 ip0 = vlib_buffer_get_current (p0);
133 if (IP_PROTOCOL_TCP ==
134 ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
136 tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
137 if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
138 (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK)
140 /* Look up and compare */
141 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
143 if (0 == ioam_cache_ts_lookup (ip0,
150 (tcp0->ack_number), &hbh_cmp,
152 &cache_thread_id, 1))
158 ip6_ioam_analyse_compare_path_delay (hbh0, hbh_cmp,
159 cm->criteria_oneway);
162 /* current syn/ack is worse than the earlier: Drop */
163 next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
164 /* Check if all responses are received or time has exceeded
165 send cached response if yes */
166 ioam_cache_ts_check_and_send (cache_thread_id,
171 /* Update cache with this buffer */
172 /* If successfully updated then skip sending it */
175 ioam_cache_ts_update (cache_thread_id,
182 next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
187 next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
190 else if ((tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
192 /* Look up and compare */
193 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
194 if (0 == ioam_cache_ts_lookup (ip0, hbh0->protocol, clib_net_to_host_u16 (tcp0->src_port), clib_net_to_host_u16 (tcp0->dst_port), clib_net_to_host_u32 (tcp0->ack_number), &hbh_cmp, &cache_ts_index, &cache_thread_id, 1)) //response seen
196 next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
198 ioam_cache_ts_check_and_send (cache_thread_id,
204 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
206 if (p0->flags & VLIB_BUFFER_IS_TRACED)
208 cache_ts_trace_t *t =
209 vlib_add_trace (vm, node, p0, sizeof (*t));
212 (ip0->ip_version_traffic_class_and_flow_label);
213 t->next_index = next0;
216 /* verify speculative enqueue, maybe switch current next frame */
222 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
223 to_next, n_left_to_next,
228 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
230 vlib_node_increment_counter (vm, ioam_cache_ts_node.index,
231 CACHE_TS_ERROR_RECORDED, recorded);
232 return frame->n_vectors;
236 * Node for IP6 iOAM header cache
239 VLIB_REGISTER_NODE (ioam_cache_ts_node) =
241 .function = ip6_ioam_cache_ts_node_fn,
242 .name = "ip6-ioam-tunnel-select",
243 .vector_size = sizeof (u32),
244 .format_trace = format_cache_ts_trace,
245 .type = VLIB_NODE_TYPE_INTERNAL,
246 .n_errors = ARRAY_LEN (cache_ts_error_strings),
247 .error_strings = cache_ts_error_strings,
248 .n_next_nodes = IOAM_CACHE_TS_N_NEXT,
249 /* edit / add dispositions here */
252 [IOAM_CACHE_TS_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop",
253 [IOAM_CACHE_TS_ERROR_NEXT_DROP] = "error-drop",
261 } ip6_reset_ts_hbh_trace_t;
263 /* packet trace format function */
265 format_ip6_reset_ts_hbh_trace (u8 * s, va_list * args)
267 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
268 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
269 ip6_reset_ts_hbh_trace_t *t = va_arg (*args,
270 ip6_reset_ts_hbh_trace_t *);
273 format (s, "IP6_IOAM_RESET_TUNNEL_SELECT_HBH: next index %d",
278 vlib_node_registration_t ip6_reset_ts_hbh_node;
280 #define foreach_ip6_reset_ts_hbh_error \
281 _(PROCESSED, "iOAM Syn/Ack Pkts processed") \
282 _(SAVED, "iOAM Syn Pkts state saved") \
283 _(REMOVED, "iOAM Syn/Ack Pkts state removed")
287 #define _(sym,str) IP6_RESET_TS_HBH_ERROR_##sym,
288 foreach_ip6_reset_ts_hbh_error
290 IP6_RESET_TS_HBH_N_ERROR,
291 } ip6_reset_ts_hbh_error_t;
293 static char *ip6_reset_ts_hbh_error_strings[] = {
294 #define _(sym,string) string,
295 foreach_ip6_reset_ts_hbh_error
299 #define foreach_ip6_ioam_cache_ts_input_next \
300 _(IP6_LOOKUP, "ip6-lookup") \
301 _(DROP, "error-drop")
305 #define _(s,n) IP6_IOAM_CACHE_TS_INPUT_NEXT_##s,
306 foreach_ip6_ioam_cache_ts_input_next
308 IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
309 } ip6_ioam_cache_ts_input_next_t;
313 ip6_reset_ts_hbh_node_fn (vlib_main_t * vm,
314 vlib_node_runtime_t * node, vlib_frame_t * frame)
316 ioam_cache_main_t *cm = &ioam_cache_main;
317 u32 n_left_from, *from, *to_next;
318 ip_lookup_next_t next_index;
319 u32 processed = 0, cache_ts_added = 0;
321 u8 *rewrite = cm->rewrite;
322 u32 rewrite_length = vec_len (rewrite);
323 ioam_e2e_cache_option_t *e2e = 0;
324 u8 no_of_responses = cm->wait_for_responses;
326 from = vlib_frame_vector_args (frame);
327 n_left_from = frame->n_vectors;
328 next_index = node->cached_next_index;
330 while (n_left_from > 0)
334 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
336 now = vlib_time_now (vm);
337 while (n_left_from >= 4 && n_left_to_next >= 2)
340 vlib_buffer_t *b0, *b1;
342 ip6_header_t *ip0, *ip1;
343 tcp_header_t *tcp0, *tcp1;
344 u32 tcp_offset0, tcp_offset1;
345 ip6_hop_by_hop_header_t *hbh0, *hbh1;
346 u64 *copy_src0, *copy_dst0, *copy_src1, *copy_dst1;
348 u32 pool_index0 = 0, pool_index1 = 0;
350 next0 = next1 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
351 /* Prefetch next iteration. */
353 vlib_buffer_t *p2, *p3;
355 p2 = vlib_get_buffer (vm, from[2]);
356 p3 = vlib_get_buffer (vm, from[3]);
358 vlib_prefetch_buffer_header (p2, LOAD);
359 vlib_prefetch_buffer_header (p3, LOAD);
360 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
361 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
365 /* speculatively enqueue b0 to the current next frame */
366 to_next[0] = bi0 = from[0];
367 to_next[1] = bi1 = from[1];
373 b0 = vlib_get_buffer (vm, bi0);
374 b1 = vlib_get_buffer (vm, bi1);
376 ip0 = vlib_buffer_get_current (b0);
377 ip1 = vlib_buffer_get_current (b1);
379 if (IP_PROTOCOL_TCP !=
380 ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
384 tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
385 if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
386 (tcp0->flags & TCP_FLAG_ACK) == 0)
388 if (no_of_responses > 0)
390 /* Create TS select entry */
391 if (0 == ioam_cache_ts_add (ip0,
397 (tcp0->seq_number) + 1,
398 no_of_responses, now,
399 vm->thread_index, &pool_index0))
404 copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
405 copy_src0 = (u64 *) ip0;
407 copy_dst0[0] = copy_src0[0];
408 copy_dst0[1] = copy_src0[1];
409 copy_dst0[2] = copy_src0[2];
410 copy_dst0[3] = copy_src0[3];
411 copy_dst0[4] = copy_src0[4];
413 vlib_buffer_advance (b0, -(word) rewrite_length);
414 ip0 = vlib_buffer_get_current (b0);
416 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
417 /* $$$ tune, rewrite_length is a multiple of 8 */
418 clib_memcpy_fast (hbh0, rewrite, rewrite_length);
420 (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
421 cm->rewrite_pool_index_offset);
422 e2e->pool_id = (u8) vm->thread_index;
423 e2e->pool_index = pool_index0;
424 ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
426 sizeof (ioam_e2e_cache_option_t)),
427 &cm->sr_localsid_ts);
428 /* Patch the protocol chain, insert the h-b-h (type 0) header */
429 hbh0->protocol = ip0->protocol;
432 clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
433 ip0->payload_length = clib_host_to_net_u16 (new_l0);
438 if (IP_PROTOCOL_TCP !=
439 ip6_locate_header (b1, ip1, IP_PROTOCOL_TCP, &tcp_offset1))
443 tcp1 = (tcp_header_t *) ((u8 *) ip1 + tcp_offset1);
444 if ((tcp1->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
445 (tcp1->flags & TCP_FLAG_ACK) == 0)
447 if (no_of_responses > 0)
449 /* Create TS select entry */
450 if (0 == ioam_cache_ts_add (ip1,
456 (tcp1->seq_number) + 1,
457 no_of_responses, now,
458 vm->thread_index, &pool_index1))
464 copy_dst1 = (u64 *) (((u8 *) ip1) - rewrite_length);
465 copy_src1 = (u64 *) ip1;
467 copy_dst1[0] = copy_src1[0];
468 copy_dst1[1] = copy_src1[1];
469 copy_dst1[2] = copy_src1[2];
470 copy_dst1[3] = copy_src1[3];
471 copy_dst1[4] = copy_src1[4];
473 vlib_buffer_advance (b1, -(word) rewrite_length);
474 ip1 = vlib_buffer_get_current (b1);
476 hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
477 /* $$$ tune, rewrite_length is a multiple of 8 */
478 clib_memcpy_fast (hbh1, rewrite, rewrite_length);
480 (ioam_e2e_cache_option_t *) ((u8 *) hbh1 +
481 cm->rewrite_pool_index_offset);
482 e2e->pool_id = (u8) vm->thread_index;
483 e2e->pool_index = pool_index1;
484 ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
486 sizeof (ioam_e2e_cache_option_t)),
487 &cm->sr_localsid_ts);
488 /* Patch the protocol chain, insert the h-b-h (type 0) header */
489 hbh1->protocol = ip1->protocol;
492 clib_net_to_host_u16 (ip1->payload_length) + rewrite_length;
493 ip1->payload_length = clib_host_to_net_u16 (new_l1);
498 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
500 if (b0->flags & VLIB_BUFFER_IS_TRACED)
502 ip6_reset_ts_hbh_trace_t *t =
503 vlib_add_trace (vm, node, b0, sizeof (*t));
504 t->next_index = next0;
506 if (b1->flags & VLIB_BUFFER_IS_TRACED)
508 ip6_reset_ts_hbh_trace_t *t =
509 vlib_add_trace (vm, node, b1, sizeof (*t));
510 t->next_index = next1;
515 /* verify speculative enqueue, maybe switch current next frame */
516 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
517 to_next, n_left_to_next,
518 bi0, bi1, next0, next1);
520 while (n_left_from > 0 && n_left_to_next > 0)
528 ip6_hop_by_hop_header_t *hbh0;
529 u64 *copy_src0, *copy_dst0;
533 next0 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
534 /* speculatively enqueue b0 to the current next frame */
542 b0 = vlib_get_buffer (vm, bi0);
544 ip0 = vlib_buffer_get_current (b0);
545 if (IP_PROTOCOL_TCP !=
546 ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
550 tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
551 if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
552 (tcp0->flags & TCP_FLAG_ACK) == 0)
554 if (no_of_responses > 0)
556 /* Create TS select entry */
557 if (0 == ioam_cache_ts_add (ip0,
563 (tcp0->seq_number) + 1,
564 no_of_responses, now,
565 vm->thread_index, &pool_index0))
570 copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
571 copy_src0 = (u64 *) ip0;
573 copy_dst0[0] = copy_src0[0];
574 copy_dst0[1] = copy_src0[1];
575 copy_dst0[2] = copy_src0[2];
576 copy_dst0[3] = copy_src0[3];
577 copy_dst0[4] = copy_src0[4];
579 vlib_buffer_advance (b0, -(word) rewrite_length);
580 ip0 = vlib_buffer_get_current (b0);
582 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
583 /* $$$ tune, rewrite_length is a multiple of 8 */
584 clib_memcpy_fast (hbh0, rewrite, rewrite_length);
586 (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
587 cm->rewrite_pool_index_offset);
588 e2e->pool_id = (u8) vm->thread_index;
589 e2e->pool_index = pool_index0;
590 ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
592 sizeof (ioam_e2e_cache_option_t)),
593 &cm->sr_localsid_ts);
594 /* Patch the protocol chain, insert the h-b-h (type 0) header */
595 hbh0->protocol = ip0->protocol;
598 clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
599 ip0->payload_length = clib_host_to_net_u16 (new_l0);
603 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
604 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
606 ip6_reset_ts_hbh_trace_t *t =
607 vlib_add_trace (vm, node, b0, sizeof (*t));
608 t->next_index = next0;
611 /* verify speculative enqueue, maybe switch current next frame */
612 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
613 to_next, n_left_to_next,
617 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
620 vlib_node_increment_counter (vm, ip6_reset_ts_hbh_node.index,
621 IP6_RESET_TS_HBH_ERROR_PROCESSED, processed);
622 vlib_node_increment_counter (vm, ip6_reset_ts_hbh_node.index,
623 IP6_RESET_TS_HBH_ERROR_SAVED, cache_ts_added);
625 return frame->n_vectors;
629 VLIB_REGISTER_NODE (ip6_reset_ts_hbh_node) =
631 .function = ip6_reset_ts_hbh_node_fn,
632 .name = "ip6-add-syn-hop-by-hop",
633 .vector_size = sizeof (u32),
634 .format_trace = format_ip6_reset_ts_hbh_trace,
635 .type = VLIB_NODE_TYPE_INTERNAL,
636 .n_errors = ARRAY_LEN (ip6_reset_ts_hbh_error_strings),
637 .error_strings = ip6_reset_ts_hbh_error_strings,
638 /* See ip/lookup.h */
639 .n_next_nodes = IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
642 #define _(s,n) [IP6_IOAM_CACHE_TS_INPUT_NEXT_##s] = n,
643 foreach_ip6_ioam_cache_ts_input_next
648 VLIB_NODE_FUNCTION_MULTIARCH (ip6_reset_ts_hbh_node, ip6_reset_ts_hbh_node_fn)
651 vlib_node_registration_t ioam_cache_ts_timer_tick_node;
656 } ioam_cache_ts_timer_tick_trace_t;
658 /* packet trace format function */
660 format_ioam_cache_ts_timer_tick_trace (u8 * s, va_list * args)
662 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
663 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
664 ioam_cache_ts_timer_tick_trace_t *t =
665 va_arg (*args, ioam_cache_ts_timer_tick_trace_t *);
667 s = format (s, "IOAM_CACHE_TS_TIMER_TICK: thread index %d",
672 #define foreach_ioam_cache_ts_timer_tick_error \
673 _(TIMER, "Timer events")
677 #define _(sym,str) IOAM_CACHE_TS_TIMER_TICK_ERROR_##sym,
678 foreach_ioam_cache_ts_timer_tick_error
680 IOAM_CACHE_TS_TIMER_TICK_N_ERROR,
681 } ioam_cache_ts_timer_tick_error_t;
683 static char *ioam_cache_ts_timer_tick_error_strings[] = {
684 #define _(sym,string) string,
685 foreach_ioam_cache_ts_timer_tick_error
690 ioam_cache_ts_timer_node_enable (vlib_main_t * vm, u8 enable)
692 vlib_node_set_state (vm, ioam_cache_ts_timer_tick_node.index,
694 0 ? VLIB_NODE_STATE_DISABLED :
695 VLIB_NODE_STATE_POLLING);
699 expired_cache_ts_timer_callback (u32 * expired_timers)
701 ioam_cache_main_t *cm = &ioam_cache_main;
704 u32 thread_index = vlib_get_thread_index ();
707 for (i = 0; i < vec_len (expired_timers); i++)
709 /* Get pool index and pool id */
710 pool_index = expired_timers[i] & 0x0FFFFFFF;
712 /* Handle expiration */
713 ioam_cache_ts_send (thread_index, pool_index);
716 vlib_node_increment_counter (cm->vlib_main,
717 ioam_cache_ts_timer_tick_node.index,
718 IOAM_CACHE_TS_TIMER_TICK_ERROR_TIMER, count);
722 ioam_cache_ts_timer_tick_node_fn (vlib_main_t * vm,
723 vlib_node_runtime_t * node,
726 ioam_cache_main_t *cm = &ioam_cache_main;
727 u32 my_thread_index = vlib_get_thread_index ();
728 struct timespec ts, tsrem;
730 tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index],
733 ts.tv_nsec = 1000 * 1000 * IOAM_CACHE_TS_TICK;
734 while (nanosleep (&ts, &tsrem) < 0)
742 VLIB_REGISTER_NODE (ioam_cache_ts_timer_tick_node) = {
743 .function = ioam_cache_ts_timer_tick_node_fn,
744 .name = "ioam-cache-ts-timer-tick",
745 .format_trace = format_ioam_cache_ts_timer_tick_trace,
746 .type = VLIB_NODE_TYPE_INPUT,
748 .n_errors = ARRAY_LEN(ioam_cache_ts_timer_tick_error_strings),
749 .error_strings = ioam_cache_ts_timer_tick_error_strings,
753 .state = VLIB_NODE_STATE_DISABLED,
755 /* edit / add dispositions here */
763 * fd.io coding-style-patch-verification: ON
766 * eval: (c-set-style "gnu")