2 * esp_encrypt.c : ipsecmb ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/udp/udp.h>
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
26 #include <ipsecmb/ipsecmb.h>
28 #define foreach_esp_encrypt_next \
29 _ (DROP, "error-drop") \
30 _ (IP4_LOOKUP, "ip4-lookup") \
31 _ (IP6_LOOKUP, "ip6-lookup") \
32 _ (INTERFACE_OUTPUT, "interface-output")
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 foreach_esp_encrypt_next
42 #define foreach_esp_encrypt_error \
43 _ (RX_PKTS, "ESP pkts received") \
44 _ (NO_BUFFER, "No buffer (packet dropped)") \
45 _ (DECRYPTION_FAILED, "ESP encryption failed") \
46 _ (SEQ_CYCLED, "sequence number cycled")
50 #define _(sym, str) ESP_ENCRYPT_ERROR_##sym,
51 foreach_esp_encrypt_error
54 } esp_encrypt_error_t;
61 ipsec_crypto_alg_t crypto_alg;
62 ipsec_integ_alg_t integ_alg;
63 } esp_encrypt_trace_t;
65 #ifdef CLIB_MARCH_VARIANT
67 add_random_bytes_from_traffic (ipsecmb_main_t * imbm,
68 u32 thread_index, void *from, u8 size)
70 ASSERT (STRUCT_SIZE_OF (random_bytes_t, data) == size);
73 ipsecmb_per_thread_data_t *t = &imbm->per_thread_data[thread_index];;
74 if (PREDICT_TRUE (vec_len (t->rb_recycle_list)))
76 idx = vec_pop (t->rb_recycle_list);
77 rb = pool_elt_at_index (t->rb_pool, idx);
81 pool_get (t->rb_pool, rb);
82 idx = rb - t->rb_pool;
84 clib_memcpy (rb->data, from, STRUCT_SIZE_OF (random_bytes_t, data));
85 vec_add1 (t->rb_from_traffic, idx);
89 random_bytes (ipsecmb_main_t * imbm, u32 thread_index, u8 * where, u8 size)
91 ASSERT (STRUCT_SIZE_OF (random_bytes_t, data) == size);
92 const u8 block_size = STRUCT_SIZE_OF (random_bytes_t, data);
93 ipsecmb_per_thread_data_t *t = &imbm->per_thread_data[thread_index];;
94 if (PREDICT_TRUE (vec_len (t->rb_from_traffic)))
96 u32 idx = vec_pop (t->rb_from_traffic);
97 random_bytes_t *rb = pool_elt_at_index (t->rb_pool, idx);
98 clib_memcpy (where, rb->data, block_size);
99 vec_add1 (t->rb_recycle_list, idx);
102 if (PREDICT_FALSE (0 == vec_len (t->rb_from_dev_urandom)))
104 ssize_t bytes_read = read (imbm->dev_urandom_fd, t->urandom_buffer,
105 sizeof (t->urandom_buffer));
108 clib_unix_warning ("read() from /dev/urandom failed");
111 if (bytes_read < block_size)
114 ("read() from /dev/urandom produced only %zd bytes", bytes_read);
117 const ssize_t limit = clib_min (bytes_read, sizeof (t->urandom_buffer));
119 for (i = 0; limit - i >= block_size && vec_len (t->rb_recycle_list) > 0;
122 u32 idx = vec_pop (t->rb_recycle_list);
123 random_bytes_t *rb = pool_elt_at_index (t->rb_pool, idx);
124 clib_memcpy (rb->data, t->urandom_buffer + i, block_size);
125 vec_add1 (t->rb_from_dev_urandom, idx);
127 for (; limit - i >= block_size; i += block_size)
130 pool_get (t->rb_pool, rb);
131 clib_memcpy (rb->data, t->urandom_buffer + i, block_size);
132 vec_add1 (t->rb_from_dev_urandom, rb - t->rb_pool);
135 u32 idx = vec_pop (t->rb_from_dev_urandom);
136 random_bytes_t *rb = pool_elt_at_index (t->rb_pool, idx);
137 clib_memcpy (where, rb->data, block_size);
138 vec_add1 (t->rb_recycle_list, idx);
143 esp_finish_encrypt (vlib_main_t * vm, JOB_AES_HMAC * job,
144 ipsecmb_main_t * imbm, int thread_index,
145 u32 * bi0, u32 * next0, ipsec_sa_t ** sa0, int is_ip6)
147 ip4_header_t *oh4 = 0;
148 udp_header_t *udp = 0;
149 ip6_header_t *oh6 = 0;
150 ipsec_main_t *im = &ipsec_main;
151 *bi0 = (uintptr_t) job->user_data;
152 vlib_buffer_t *b0 = vlib_get_buffer (vm, *bi0);
153 u32 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
154 *sa0 = pool_elt_at_index (im->sad, sa_index0);
155 oh4 = vlib_buffer_get_current (b0);
156 oh6 = vlib_buffer_get_current (b0);
159 oh6->payload_length =
160 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
161 sizeof (ip6_header_t));
166 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
167 oh4->checksum = ip4_header_checksum (oh4);
168 if ((*sa0)->udp_encap)
170 udp = (udp_header_t *) (oh4 + 1);
172 clib_host_to_net_u16 (clib_net_to_host_u16 (oh4->length) -
173 ip4_header_bytes (oh4));
177 *next0 = (uintptr_t) job->user_data2;
178 const int iv_size = imbm->crypto_algs[(*sa0)->crypto_alg].iv_size;
179 add_random_bytes_from_traffic (imbm, thread_index,
180 vlib_buffer_get_current (b0) +
181 b0->current_length - iv_size, iv_size);
182 if (!(*sa0)->is_tunnel)
184 *next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
185 vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
190 ipsemb_ip4_fill_comon_values (ip4_header_t * oh4, u8 tos)
192 oh4->ip_version_and_header_length = 0x45;
194 oh4->fragment_id = 0;
195 oh4->flags_and_fragment_offset = 0;
200 ipsemb_handle_udp_encap (ipsec_sa_t * sa0, esp_header_t ** esp,
205 *esp = (esp_header_t *) ((u8 *) esp + sizeof (udp_header_t));
206 udp_header_t *udp = (udp_header_t *) ((*oh4) + 1);
207 udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
208 udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
210 (*oh4)->protocol = IP_PROTOCOL_UDP;
214 (*oh4)->protocol = IP_PROTOCOL_IPSEC_ESP;
219 esp_prepare_tunneL_headers (vlib_buffer_t * b0, ipsec_sa_t * sa0, u32 * next0,
220 u8 * next_hdr_type, ip4_header_t * ih4,
221 ip4_header_t ** oh4, ip6_header_t * ih6,
222 ip6_header_t ** oh6, esp_header_t ** esp,
223 u32 iv_size, int is_ip6)
227 *next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
228 *next_hdr_type = IP_PROTOCOL_IPV6;
229 *oh6 = (ip6_header_t *) ((u8 *) ih6 - sizeof (esp_header_t) -
230 sizeof (ip6_header_t) - iv_size);
231 (*oh6)->src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0];
232 (*oh6)->src_address.as_u64[1] = sa0->tunnel_src_addr.ip6.as_u64[1];
233 (*oh6)->dst_address.as_u64[0] = sa0->tunnel_dst_addr.ip6.as_u64[0];
234 (*oh6)->dst_address.as_u64[1] = sa0->tunnel_dst_addr.ip6.as_u64[1];
236 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
237 vlib_buffer_advance (b0, -(sizeof (esp_header_t) +
238 sizeof (ip6_header_t) + iv_size));
239 (*oh6)->ip_version_traffic_class_and_flow_label =
240 ih6->ip_version_traffic_class_and_flow_label;
241 (*oh6)->protocol = IP_PROTOCOL_IPSEC_ESP;
242 (*oh6)->hop_limit = 254;
243 *esp = (esp_header_t *) ((*oh6) + 1);
247 *next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
248 u32 udp_hdr_size = 0;
251 udp_hdr_size = sizeof (udp_header_t);
253 *next_hdr_type = IP_PROTOCOL_IP_IN_IP;
255 (ip4_header_t *) (((u8 *) ih4) - sizeof (ip4_header_t) -
256 sizeof (esp_header_t) - udp_hdr_size - iv_size);
257 (*oh4)->src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
258 (*oh4)->dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
259 vlib_buffer_advance (b0, -(sizeof (ip4_header_t) +
260 sizeof (esp_header_t) +
261 udp_hdr_size + iv_size));
262 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
263 *esp = (esp_header_t *) ((*oh4) + 1);
265 ipsemb_ip4_fill_comon_values (*oh4, ih4->tos);
266 ipsemb_handle_udp_encap (sa0, esp, oh4);
271 esp_prepare_transport_headers (vlib_buffer_t * b0, ipsec_sa_t * sa0,
272 u32 * next0, u8 * next_hdr_type,
273 ip4_header_t * ih4, ip4_header_t ** oh4,
274 ip6_header_t * ih6, ip6_header_t ** oh6,
275 esp_header_t ** esp, u32 iv_size, int is_ip6)
279 *next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
280 *next_hdr_type = ih6->protocol;
281 (*oh6) = (ip6_header_t *) ((u8 *) ih6 - sizeof (esp_header_t) -
283 if (vnet_buffer (b0)->sw_if_index[VLIB_TX] != ~0)
285 ethernet_header_t *ieh0, *oeh0;
286 ieh0 = (ethernet_header_t *) vlib_buffer_get_current (b0) - 1;
287 oeh0 = (ethernet_header_t *) (*oh6) - 1;
288 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
290 (*oh6)->src_address.as_u64[0] = ih6->src_address.as_u64[0];
291 (*oh6)->src_address.as_u64[1] = ih6->src_address.as_u64[1];
292 (*oh6)->dst_address.as_u64[0] = ih6->dst_address.as_u64[0];
293 (*oh6)->dst_address.as_u64[1] = ih6->dst_address.as_u64[1];
294 vlib_buffer_advance (b0, -(sizeof (esp_header_t) + iv_size));
295 (*oh6)->ip_version_traffic_class_and_flow_label =
296 ih6->ip_version_traffic_class_and_flow_label;
297 (*oh6)->protocol = IP_PROTOCOL_IPSEC_ESP;
298 (*oh6)->hop_limit = 254;
299 *esp = (esp_header_t *) ((*oh6) + 1);
303 *next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
304 u32 udp_hdr_size = 0;
307 udp_hdr_size = sizeof (udp_header_t);
309 *next_hdr_type = ih4->protocol;
310 (*oh4) = (ip4_header_t *) (((u8 *) ih4) - sizeof (esp_header_t) -
311 udp_hdr_size - iv_size);
312 if (vnet_buffer (b0)->sw_if_index[VLIB_TX] != ~0)
314 ethernet_header_t *ieh0, *oeh0;
315 ieh0 = (ethernet_header_t *) vlib_buffer_get_current (b0) - 1;
316 oeh0 = (ethernet_header_t *) (*oh4) - 1;
317 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
319 (*oh4)->src_address.as_u32 = ih4->src_address.as_u32;
320 (*oh4)->dst_address.as_u32 = ih4->dst_address.as_u32;
321 vlib_buffer_advance (b0,
322 -(sizeof (esp_header_t) + udp_hdr_size + iv_size));
323 *esp = (esp_header_t *) ((*oh4) + 1);
325 ipsemb_ip4_fill_comon_values (*oh4, ih4->tos);
326 ipsemb_handle_udp_encap (sa0, esp, oh4);
331 esp_encrypt_ipsecmb_inline (vlib_main_t * vm,
332 vlib_node_runtime_t * node,
333 vlib_frame_t * from_frame, int is_ip6)
335 u32 n_left_from, *from, *to_next = 0, next_index;
336 from = vlib_frame_vector_args (from_frame);
337 n_left_from = from_frame->n_vectors;
338 ipsecmb_main_t *imbm = &ipsecmb_main;
339 ipsec_main_t *im = &ipsec_main;
340 u32 packets_in_flight = 0;
341 next_index = node->cached_next_index;
342 u32 thread_index = vlib_get_thread_index ();
343 ipsec_alloc_empty_buffers (vm, im);
344 u32 *to_be_freed = NULL;
345 ipsecmb_per_thread_data_t *t = &imbm->per_thread_data[thread_index];;
347 MB_MGR *mgr = imbm->mb_mgr[thread_index];
349 while (n_left_from > 0 || packets_in_flight > 0)
353 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
355 while (n_left_from > 0 && n_left_to_next > 0)
358 vlib_buffer_t *b0 = 0;
362 ip4_header_t *ih4, *oh4 = 0;
363 ip6_header_t *ih6, *oh6 = 0;
371 next0 = ESP_ENCRYPT_NEXT_DROP;
373 b0 = vlib_get_buffer (vm, bi0);
374 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
375 sa0 = pool_elt_at_index (im->sad, sa_index0);
376 samb0 = pool_elt_at_index (imbm->sad, sa_index0);
378 if (esp_seq_advance (sa0))
380 clib_warning ("sequence number counter has cycled SPI %u",
382 vlib_node_increment_counter (vm, node->node_index,
383 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
390 sa0->total_data_size += b0->current_length;
392 if (PREDICT_FALSE (b0->n_add_refs > 0))
394 vec_add1 (to_be_freed, bi0);
395 b0 = vlib_buffer_copy (vm, b0);
396 bi0 = vlib_get_buffer_index (vm, b0);
399 ih4 = vlib_buffer_get_current (b0);
400 ih6 = vlib_buffer_get_current (b0);
402 const int iv_size = imbm->crypto_algs[sa0->crypto_alg].iv_size;
404 esp_prepare_tunneL_headers (b0, sa0, &next0, &next_hdr_type, ih4,
405 &oh4, ih6, &oh6, &esp, iv_size,
408 esp_prepare_transport_headers (b0, sa0, &next0, &next_hdr_type,
409 ih4, &oh4, ih6, &oh6, &esp,
413 esp->spi = clib_net_to_host_u32 (sa0->spi);
414 esp->seq = clib_net_to_host_u32 (sa0->seq);
415 ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
418 const u32 payload_offset =
419 (u8 *) (esp + 1) + iv_size - (u8 *) vlib_buffer_get_current (b0);
420 JOB_AES_HMAC *job = IPSECMB_FUNC (get_next_job) (mgr);
421 if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
423 const int block_size =
424 imbm->crypto_algs[sa0->crypto_alg].block_size;
425 u32 payload_length = b0->current_length - payload_offset;
426 int blocks = 1 + (payload_length + 1) / block_size;
428 /* pad packet in input buffer */
430 block_size * blocks - sizeof (esp_footer_t) - payload_length;
432 u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
433 b0->current_length = payload_offset + block_size * blocks;
434 for (i = 0; i < pad_bytes; ++i)
438 f0 = vlib_buffer_get_current (b0) + b0->current_length -
439 sizeof (esp_footer_t);
440 f0->pad_length = pad_bytes;
441 f0->next_header = next_hdr_type;
443 random_bytes (imbm, thread_index, (u8 *) (esp + 1), iv_size);
444 job->iv = (u8 *) (esp + 1);
445 job->iv_len_in_bytes = iv_size;
448 job->chain_order = CIPHER_HASH;
449 job->cipher_direction = ENCRYPT;
450 job->src = (u8 *) esp;
451 job->dst = (u8 *) ((u8 *) (esp + 1) + iv_size);
452 job->cipher_mode = imbm->crypto_algs[sa0->crypto_alg].cipher_mode;
453 job->aes_enc_key_expanded = samb0->aes_enc_key_expanded;
454 job->aes_dec_key_expanded = samb0->aes_dec_key_expanded;
455 job->aes_key_len_in_bytes = sa0->crypto_key_len;
456 job->cipher_start_src_offset_in_bytes =
457 sizeof (esp_header_t) + iv_size;
458 job->hash_start_src_offset_in_bytes = 0;
459 job->msg_len_to_cipher_in_bytes =
460 b0->current_length - payload_offset;
461 if (PREDICT_TRUE (IPSEC_INTEG_ALG_NONE != sa0->integ_alg))
465 *(u32 *) (vlib_buffer_get_current (b0) +
466 b0->current_length) = sa0->seq_hi;
467 b0->current_length += sizeof (u32);
469 job->msg_len_to_hash_in_bytes = b0->current_length -
470 payload_offset + sizeof (esp_header_t) + iv_size;
471 job->u.HMAC._hashed_auth_key_xor_ipad = samb0->ipad_hash;
472 job->u.HMAC._hashed_auth_key_xor_opad = samb0->opad_hash;
473 job->auth_tag_output =
474 vlib_buffer_get_current (b0) + b0->current_length;
475 job->auth_tag_output_len_in_bytes =
476 imbm->integ_algs[sa0->integ_alg].hash_output_length;
477 b0->current_length +=
478 imbm->integ_algs[sa0->integ_alg].hash_output_length;
480 job->hash_alg = imbm->integ_algs[sa0->integ_alg].hash_alg;
481 job->user_data = (void *) (uintptr_t) bi0;
482 job->user_data2 = (void *) (uintptr_t) next0;
483 job = IPSECMB_FUNC (submit_job) (mgr);
492 esp_finish_encrypt (vm, job, imbm, thread_index, &bi0, &next0, &sa0,
500 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
502 esp_encrypt_trace_t *tr =
503 vlib_add_trace (vm, node, b0, sizeof (*tr));
505 tr->seq = sa0->seq - 1;
506 tr->udp_encap = sa0->udp_encap;
507 tr->crypto_alg = sa0->crypto_alg;
508 tr->integ_alg = sa0->integ_alg;
511 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
512 n_left_to_next, bi0, next0);
515 if (PREDICT_FALSE (n_left_from == 0))
517 JOB_AES_HMAC *job = NULL;
518 while (n_left_to_next > 0 && (job = IPSECMB_FUNC (flush_job) (mgr)))
525 esp_finish_encrypt (vm, job, imbm, thread_index, &bi0, &next0,
527 b0 = vlib_get_buffer (vm, bi0);
533 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
535 esp_encrypt_trace_t *tr =
536 vlib_add_trace (vm, node, b0, sizeof (*tr));
538 tr->seq = sa0->seq - 1;
539 tr->udp_encap = sa0->udp_encap;
540 tr->crypto_alg = sa0->crypto_alg;
541 tr->integ_alg = sa0->integ_alg;
544 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
545 n_left_to_next, bi0, next0);
549 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
551 vlib_node_increment_counter (vm, node->node_index,
552 ESP_ENCRYPT_ERROR_RX_PKTS,
553 from_frame->n_vectors);
556 vlib_buffer_free (vm, to_be_freed, vec_len (to_be_freed));
557 vec_free (to_be_freed);
558 if (PREDICT_TRUE (vec_len (t->rb_from_traffic) > 0))
560 /* recycle traffic generated buffers, because once the packets are sent
561 * out, bytes from these packets are no longer unpredictable */
562 vec_add (t->rb_recycle_list, t->rb_from_traffic,
563 vec_len (t->rb_from_traffic));
564 _vec_len (t->rb_from_traffic) = 0;
566 return from_frame->n_vectors;
569 VLIB_NODE_FN (esp4_encrypt_ipsecmb_node) (vlib_main_t * vm,
570 vlib_node_runtime_t * node,
571 vlib_frame_t * from_frame)
573 return esp_encrypt_ipsecmb_inline (vm, node, from_frame, 0 /*is_ip6 */ );
576 VLIB_NODE_FN (esp6_encrypt_ipsecmb_node) (vlib_main_t * vm,
577 vlib_node_runtime_t * node,
578 vlib_frame_t * from_frame)
580 return esp_encrypt_ipsecmb_inline (vm, node, from_frame, 1 /*is_ip6 */ );
584 static char *esp_encrypt_error_strings[] = {
585 #define _(sym, string) string,
586 foreach_esp_encrypt_error
590 /* packet trace format function */
592 format_esp_encrypt_trace (u8 * s, va_list * args)
594 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
595 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
596 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
599 format (s, "esp: spi %u seq %u crypto %U integrity %U%s", t->spi, t->seq,
600 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
601 t->integ_alg, t->udp_encap ? " udp-encap-enabled" : "");
606 VLIB_REGISTER_NODE (esp4_encrypt_ipsecmb_node) = {
607 .name = "esp4-encrypt-ipsecmb",
608 .vector_size = sizeof (u32),
609 .format_trace = format_esp_encrypt_trace,
610 .type = VLIB_NODE_TYPE_INTERNAL,
612 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
613 .error_strings = esp_encrypt_error_strings,
615 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
618 #define _(s, n) [ESP_ENCRYPT_NEXT_##s] = n,
619 foreach_esp_encrypt_next
626 VLIB_REGISTER_NODE (esp6_encrypt_ipsecmb_node) = {
627 .name = "esp6-encrypt-ipsecmb",
628 .vector_size = sizeof (u32),
629 .format_trace = format_esp_encrypt_trace,
630 .type = VLIB_NODE_TYPE_INTERNAL,
632 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
633 .error_strings = esp_encrypt_error_strings,
635 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
638 #define _(s, n) [ESP_ENCRYPT_NEXT_##s] = n,
639 foreach_esp_encrypt_next
646 * fd.io coding-style-patch-verification: ON
649 * eval: (c-set-style "gnu")