VPP-256 - Coding style cleanup vnet/vnet/ipsec
[vpp.git] / vnet / vnet / ipsec / esp_encrypt.c
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24
25 #define ESP_SEQ_MAX (4294967295UL)
26
27 #define foreach_esp_encrypt_next                   \
28 _(DROP, "error-drop")                              \
29 _(IP4_INPUT, "ip4-input")                          \
30 _(IP6_INPUT, "ip6-input")                          \
31 _(INTERFACE_OUTPUT, "interface-output")
32
33 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
34 typedef enum
35 {
36   foreach_esp_encrypt_next
37 #undef _
38     ESP_ENCRYPT_N_NEXT,
39 } esp_encrypt_next_t;
40
41 #define foreach_esp_encrypt_error                   \
42  _(RX_PKTS, "ESP pkts received")                    \
43  _(NO_BUFFER, "No buffer (packet dropped)")         \
44  _(DECRYPTION_FAILED, "ESP encryption failed")      \
45  _(SEQ_CYCLED, "sequence number cycled")
46
47
48 typedef enum
49 {
50 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
51   foreach_esp_encrypt_error
52 #undef _
53     ESP_ENCRYPT_N_ERROR,
54 } esp_encrypt_error_t;
55
56 static char *esp_encrypt_error_strings[] = {
57 #define _(sym,string) string,
58   foreach_esp_encrypt_error
59 #undef _
60 };
61
62 vlib_node_registration_t esp_encrypt_node;
63
64 typedef struct
65 {
66   u32 spi;
67   u32 seq;
68   ipsec_crypto_alg_t crypto_alg;
69   ipsec_integ_alg_t integ_alg;
70 } esp_encrypt_trace_t;
71
72 /* packet trace format function */
73 static u8 *
74 format_esp_encrypt_trace (u8 * s, va_list * args)
75 {
76   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
77   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
78   esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
79
80   s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
81               t->spi, t->seq,
82               format_ipsec_crypto_alg, t->crypto_alg,
83               format_ipsec_integ_alg, t->integ_alg);
84   return s;
85 }
86
87 always_inline void
88 esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg,
89                      u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
90 {
91   esp_main_t *em = &esp_main;
92   u32 cpu_index = os_get_cpu_number ();
93   EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx);
94   const EVP_CIPHER *cipher = NULL;
95   int out_len;
96
97   ASSERT (alg < IPSEC_CRYPTO_N_ALG);
98
99   if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
100     return;
101
102   if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg))
103     {
104       cipher = em->esp_crypto_algs[alg].type;
105       em->per_thread_data[cpu_index].last_encrypt_alg = alg;
106     }
107
108   EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
109
110   EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
111   EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
112 }
113
114 always_inline int
115 esp_seq_advance (ipsec_sa_t * sa)
116 {
117   if (PREDICT_TRUE (sa->use_esn))
118     {
119       if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
120         {
121           if (PREDICT_FALSE
122               (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
123             return 1;
124           sa->seq_hi++;
125         }
126       sa->seq++;
127     }
128   else
129     {
130       if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
131         return 1;
132       sa->seq++;
133     }
134
135   return 0;
136 }
137
138 static uword
139 esp_encrypt_node_fn (vlib_main_t * vm,
140                      vlib_node_runtime_t * node, vlib_frame_t * from_frame)
141 {
142   u32 n_left_from, *from, *to_next = 0, next_index;
143   from = vlib_frame_vector_args (from_frame);
144   n_left_from = from_frame->n_vectors;
145   ipsec_main_t *im = &ipsec_main;
146   u32 *recycle = 0;
147   u32 cpu_index = os_get_cpu_number ();
148
149   ipsec_alloc_empty_buffers (vm, im);
150
151   u32 *empty_buffers = im->empty_buffers[cpu_index];
152
153   if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
154     {
155       vlib_node_increment_counter (vm, esp_encrypt_node.index,
156                                    ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
157       clib_warning ("no enough empty buffers. discarding frame");
158       goto free_buffers_and_exit;
159     }
160
161   next_index = node->cached_next_index;
162
163   while (n_left_from > 0)
164     {
165       u32 n_left_to_next;
166
167       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
168
169       while (n_left_from > 0 && n_left_to_next > 0)
170         {
171           u32 i_bi0, o_bi0, next0;
172           vlib_buffer_t *i_b0, *o_b0 = 0;
173           u32 sa_index0;
174           ipsec_sa_t *sa0;
175           ip4_and_esp_header_t *ih0, *oh0 = 0;
176           ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
177           uword last_empty_buffer;
178           esp_header_t *o_esp0;
179           esp_footer_t *f0;
180           u8 is_ipv6;
181           u8 ip_hdr_size;
182           u8 next_hdr_type;
183           u32 ip_proto = 0;
184           u8 transport_mode = 0;
185
186           i_bi0 = from[0];
187           from += 1;
188           n_left_from -= 1;
189           n_left_to_next -= 1;
190
191           next0 = ESP_ENCRYPT_NEXT_DROP;
192
193           i_b0 = vlib_get_buffer (vm, i_bi0);
194           sa_index0 = vnet_buffer (i_b0)->output_features.ipsec_sad_index;
195           sa0 = pool_elt_at_index (im->sad, sa_index0);
196
197           if (PREDICT_FALSE (esp_seq_advance (sa0)))
198             {
199               clib_warning ("sequence number counter has cycled SPI %u",
200                             sa0->spi);
201               vlib_node_increment_counter (vm, esp_encrypt_node.index,
202                                            ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
203               //TODO: rekey SA
204               o_bi0 = i_bi0;
205               goto trace;
206             }
207
208           /* grab free buffer */
209           last_empty_buffer = vec_len (empty_buffers) - 1;
210           o_bi0 = empty_buffers[last_empty_buffer];
211           o_b0 = vlib_get_buffer (vm, o_bi0);
212           o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
213           o_b0->current_data = sizeof (ethernet_header_t);
214           ih0 = vlib_buffer_get_current (i_b0);
215           vlib_prefetch_buffer_with_index (vm,
216                                            empty_buffers[last_empty_buffer -
217                                                          1], STORE);
218           _vec_len (empty_buffers) = last_empty_buffer;
219           to_next[0] = o_bi0;
220           to_next += 1;
221
222           /* add old buffer to the recycle list */
223           vec_add1 (recycle, i_bi0);
224
225           /* is ipv6 */
226           if (PREDICT_FALSE
227               ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60))
228             {
229               is_ipv6 = 1;
230               ih6_0 = vlib_buffer_get_current (i_b0);
231               ip_hdr_size = sizeof (ip6_header_t);
232               next_hdr_type = IP_PROTOCOL_IPV6;
233               oh6_0 = vlib_buffer_get_current (o_b0);
234               o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t);
235
236               oh6_0->ip6.ip_version_traffic_class_and_flow_label =
237                 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
238               oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
239               oh6_0->ip6.hop_limit = 254;
240               oh6_0->ip6.src_address.as_u64[0] =
241                 ih6_0->ip6.src_address.as_u64[0];
242               oh6_0->ip6.src_address.as_u64[1] =
243                 ih6_0->ip6.src_address.as_u64[1];
244               oh6_0->ip6.dst_address.as_u64[0] =
245                 ih6_0->ip6.dst_address.as_u64[0];
246               oh6_0->ip6.dst_address.as_u64[1] =
247                 ih6_0->ip6.dst_address.as_u64[1];
248               oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
249               oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
250               ip_proto = ih6_0->ip6.protocol;
251             }
252           else
253             {
254               is_ipv6 = 0;
255               ip_hdr_size = sizeof (ip4_header_t);
256               next_hdr_type = IP_PROTOCOL_IP_IN_IP;
257               oh0 = vlib_buffer_get_current (o_b0);
258               o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t);
259
260               oh0->ip4.ip_version_and_header_length = 0x45;
261               oh0->ip4.tos = ih0->ip4.tos;
262               oh0->ip4.fragment_id = 0;
263               oh0->ip4.flags_and_fragment_offset = 0;
264               oh0->ip4.ttl = 254;
265               oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
266               oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
267               oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
268               oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
269               oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
270               ip_proto = ih0->ip4.protocol;
271             }
272
273           if (PREDICT_TRUE (sa0->is_tunnel && !sa0->is_tunnel_ip6))
274             {
275               oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
276               oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
277
278               /* in tunnel mode send it back to FIB */
279               next0 = ESP_ENCRYPT_NEXT_IP4_INPUT;
280               vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
281             }
282           else if (sa0->is_tunnel && sa0->is_tunnel_ip6)
283             {
284               oh6_0->ip6.src_address.as_u64[0] =
285                 sa0->tunnel_src_addr.ip6.as_u64[0];
286               oh6_0->ip6.src_address.as_u64[1] =
287                 sa0->tunnel_src_addr.ip6.as_u64[1];
288               oh6_0->ip6.dst_address.as_u64[0] =
289                 sa0->tunnel_dst_addr.ip6.as_u64[0];
290               oh6_0->ip6.dst_address.as_u64[1] =
291                 sa0->tunnel_dst_addr.ip6.as_u64[1];
292
293               /* in tunnel mode send it back to FIB */
294               next0 = ESP_ENCRYPT_NEXT_IP6_INPUT;
295               vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
296             }
297           else
298             {
299               transport_mode = 1;
300               ethernet_header_t *ieh0, *oeh0;
301               ieh0 = (ethernet_header_t *) i_b0->data;
302               oeh0 = (ethernet_header_t *) o_b0->data;
303               clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
304               vlib_buffer_advance (i_b0, ip_hdr_size);
305               next_hdr_type = ip_proto;
306               next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
307               o_b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
308               vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
309                 vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
310               vnet_buffer (o_b0)->output_features.bitmap =
311                 vnet_buffer (i_b0)->output_features.bitmap;
312             }
313
314           ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
315
316           if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
317             {
318
319               const int BLOCK_SIZE = 16;
320               const int IV_SIZE = 16;
321               int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
322
323               /* pad packet in input buffer */
324               u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
325               u8 i;
326               u8 *padding =
327                 vlib_buffer_get_current (i_b0) + i_b0->current_length;
328               i_b0->current_length = BLOCK_SIZE * blocks;
329               for (i = 0; i < pad_bytes; ++i)
330                 {
331                   padding[i] = i + 1;
332                 }
333               f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
334               f0->pad_length = pad_bytes;
335               f0->next_header = next_hdr_type;
336
337               o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) +
338                 BLOCK_SIZE * blocks + IV_SIZE;
339
340               vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
341                 vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
342
343               u8 iv[16];
344               RAND_bytes (iv, sizeof (iv));
345
346               clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
347                            ip_hdr_size + sizeof (esp_header_t), iv, 16);
348
349               esp_encrypt_aes_cbc (sa0->crypto_alg,
350                                    (u8 *) vlib_buffer_get_current (i_b0),
351                                    (u8 *) vlib_buffer_get_current (o_b0) +
352                                    ip_hdr_size + sizeof (esp_header_t) +
353                                    IV_SIZE, BLOCK_SIZE * blocks,
354                                    sa0->crypto_key, iv);
355             }
356
357           o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
358                                              sa0->integ_key_len,
359                                              (u8 *) o_esp0,
360                                              o_b0->current_length -
361                                              ip_hdr_size,
362                                              vlib_buffer_get_current (o_b0) +
363                                              o_b0->current_length,
364                                              sa0->use_esn, sa0->seq_hi);
365
366
367           if (PREDICT_FALSE (is_ipv6))
368             {
369               oh6_0->ip6.payload_length =
370                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
371                                       sizeof (ip6_header_t));
372             }
373           else
374             {
375               oh0->ip4.length =
376                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
377               oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
378             }
379
380           if (transport_mode)
381             vlib_buffer_reset (o_b0);
382
383         trace:
384           if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
385             {
386               if (o_b0)
387                 {
388                   o_b0->flags |= VLIB_BUFFER_IS_TRACED;
389                   o_b0->trace_index = i_b0->trace_index;
390                 }
391               esp_encrypt_trace_t *tr =
392                 vlib_add_trace (vm, node, o_b0, sizeof (*tr));
393               tr->spi = sa0->spi;
394               tr->seq = sa0->seq - 1;
395               tr->crypto_alg = sa0->crypto_alg;
396               tr->integ_alg = sa0->integ_alg;
397             }
398
399           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
400                                            to_next, n_left_to_next, o_bi0,
401                                            next0);
402         }
403       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
404     }
405   vlib_node_increment_counter (vm, esp_encrypt_node.index,
406                                ESP_ENCRYPT_ERROR_RX_PKTS,
407                                from_frame->n_vectors);
408
409 free_buffers_and_exit:
410   vlib_buffer_free (vm, recycle, vec_len (recycle));
411   vec_free (recycle);
412   return from_frame->n_vectors;
413 }
414
415
416 /* *INDENT-OFF* */
417 VLIB_REGISTER_NODE (esp_encrypt_node) = {
418   .function = esp_encrypt_node_fn,
419   .name = "esp-encrypt",
420   .vector_size = sizeof (u32),
421   .format_trace = format_esp_encrypt_trace,
422   .type = VLIB_NODE_TYPE_INTERNAL,
423
424   .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
425   .error_strings = esp_encrypt_error_strings,
426
427   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
428   .next_nodes = {
429 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
430     foreach_esp_encrypt_next
431 #undef _
432   },
433 };
434 /* *INDENT-ON* */
435
436 VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn)
437 /*
438  * fd.io coding-style-patch-verification: ON
439  *
440  * Local Variables:
441  * eval: (c-set-style "gnu")
442  * End:
443  */