ipsec: esp-encrypt and esp-decrypt cleanup
[vpp.git] / src / vnet / ipsec / esp_decrypt.c
1 /*
2  * esp_decrypt.c : IPSec ESP decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25
26 #define foreach_esp_decrypt_next                \
27 _(DROP, "error-drop")                           \
28 _(IP4_INPUT, "ip4-input-no-checksum")           \
29 _(IP6_INPUT, "ip6-input")                       \
30 _(IPSEC_GRE_INPUT, "ipsec-gre-input")
31
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
33 typedef enum
34 {
35   foreach_esp_decrypt_next
36 #undef _
37     ESP_DECRYPT_N_NEXT,
38 } esp_decrypt_next_t;
39
40
41 #define foreach_esp_decrypt_error                   \
42  _(RX_PKTS, "ESP pkts received")                    \
43  _(NO_BUFFER, "No buffer (packed dropped)")         \
44  _(DECRYPTION_FAILED, "ESP decryption failed")      \
45  _(INTEG_ERROR, "Integrity check failed")           \
46  _(REPLAY, "SA replayed packet")                    \
47  _(NOT_IP, "Not IP packet (dropped)")
48
49
50 typedef enum
51 {
52 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
53   foreach_esp_decrypt_error
54 #undef _
55     ESP_DECRYPT_N_ERROR,
56 } esp_decrypt_error_t;
57
58 static char *esp_decrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_decrypt_error
61 #undef _
62 };
63
64 typedef struct
65 {
66   ipsec_crypto_alg_t crypto_alg;
67   ipsec_integ_alg_t integ_alg;
68 } esp_decrypt_trace_t;
69
70 /* packet trace format function */
71 static u8 *
72 format_esp_decrypt_trace (u8 * s, va_list * args)
73 {
74   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76   esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
77
78   s = format (s, "esp: crypto %U integrity %U",
79               format_ipsec_crypto_alg, t->crypto_alg,
80               format_ipsec_integ_alg, t->integ_alg);
81   return s;
82 }
83
84 always_inline void
85 esp_decrypt_cbc (ipsec_crypto_alg_t alg,
86                  u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
87 {
88   ipsec_proto_main_t *em = &ipsec_proto_main;
89   u32 thread_index = vlib_get_thread_index ();
90 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
91   EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].decrypt_ctx;
92 #else
93   EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx);
94 #endif
95   const EVP_CIPHER *cipher = NULL;
96   int out_len;
97
98   ASSERT (alg < IPSEC_CRYPTO_N_ALG);
99
100   if (PREDICT_FALSE (em->ipsec_proto_main_crypto_algs[alg].type == 0))
101     return;
102
103   if (PREDICT_FALSE
104       (alg != em->per_thread_data[thread_index].last_decrypt_alg))
105     {
106       cipher = em->ipsec_proto_main_crypto_algs[alg].type;
107       em->per_thread_data[thread_index].last_decrypt_alg = alg;
108     }
109
110   EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv);
111
112   EVP_DecryptUpdate (ctx, out, &out_len, in, in_len);
113   EVP_DecryptFinal_ex (ctx, out + out_len, &out_len);
114 }
115
116 always_inline uword
117 esp_decrypt_inline (vlib_main_t * vm,
118                     vlib_node_runtime_t * node, vlib_frame_t * from_frame,
119                     int is_ip6)
120 {
121   ipsec_main_t *im = &ipsec_main;
122   ipsec_proto_main_t *em = &ipsec_proto_main;
123   u32 *from = vlib_frame_vector_args (from_frame);
124   u32 n_left_from = from_frame->n_vectors;
125   u32 new_bufs[VLIB_FRAME_SIZE];
126   vlib_buffer_t *i_bufs[VLIB_FRAME_SIZE], **ib = i_bufs;
127   vlib_buffer_t *o_bufs[VLIB_FRAME_SIZE], **ob = o_bufs;
128   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
129   u32 n_alloc, thread_index = vm->thread_index;
130
131   n_alloc = vlib_buffer_alloc (vm, new_bufs, n_left_from);
132   if (n_alloc != n_left_from)
133     {
134       vlib_node_increment_counter (vm, node->node_index,
135                                    ESP_DECRYPT_ERROR_NO_BUFFER,
136                                    n_left_from - n_alloc);
137       if (n_alloc == 0)
138         goto done;
139       n_left_from = n_alloc;
140     }
141
142   vlib_get_buffers (vm, from, ib, n_left_from);
143   vlib_get_buffers (vm, new_bufs, ob, n_left_from);
144
145   while (n_left_from > 0)
146     {
147       esp_header_t *esp0;
148       ipsec_sa_t *sa0;
149       u32 sa_index0 = ~0;
150       u32 seq;
151       ip4_header_t *ih4 = 0, *oh4 = 0;
152       ip6_header_t *ih6 = 0, *oh6 = 0;
153       u8 tunnel_mode = 1;
154
155       next[0] = ESP_DECRYPT_NEXT_DROP;
156
157       esp0 = vlib_buffer_get_current (ib[0]);
158       sa_index0 = vnet_buffer (ib[0])->ipsec.sad_index;
159       sa0 = pool_elt_at_index (im->sad, sa_index0);
160       seq = clib_host_to_net_u32 (esp0->seq);
161
162       /* anti-replay check */
163       if (sa0->use_anti_replay)
164         {
165           int rv = 0;
166
167           if (PREDICT_TRUE (sa0->use_esn))
168             rv = esp_replay_check_esn (sa0, seq);
169           else
170             rv = esp_replay_check (sa0, seq);
171
172           if (PREDICT_FALSE (rv))
173             {
174               u32 tmp, off = n_alloc - n_left_from;
175               /* send original packet to drop node */
176               tmp = from[off];
177               from[off] = new_bufs[off];
178               new_bufs[off] = tmp;
179               ib[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
180               next[0] = ESP_DECRYPT_NEXT_DROP;
181               goto trace;
182             }
183         }
184
185       vlib_increment_combined_counter
186         (&ipsec_sa_counters, thread_index, sa_index0,
187          1, ib[0]->current_length);
188
189       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
190         {
191           u8 sig[64];
192           int icv_size =
193             em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
194           clib_memset (sig, 0, sizeof (sig));
195           u8 *icv =
196             vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
197             icv_size;
198           ib[0]->current_length -= icv_size;
199
200           hmac_calc (sa0->integ_alg, sa0->integ_key.data,
201                      sa0->integ_key.len, (u8 *) esp0,
202                      ib[0]->current_length, sig, sa0->use_esn, sa0->seq_hi);
203
204           if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
205             {
206               u32 tmp, off = n_alloc - n_left_from;
207               /* send original packet to drop node */
208               tmp = from[off];
209               from[off] = new_bufs[off];
210               new_bufs[off] = tmp;
211               ib[0]->error = node->errors[ESP_DECRYPT_ERROR_INTEG_ERROR];
212               next[0] = ESP_DECRYPT_NEXT_DROP;
213               goto trace;
214             }
215         }
216
217       if (PREDICT_TRUE (sa0->use_anti_replay))
218         {
219           if (PREDICT_TRUE (sa0->use_esn))
220             esp_replay_advance_esn (sa0, seq);
221           else
222             esp_replay_advance (sa0, seq);
223         }
224
225       if ((sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
226            sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256) ||
227           (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
228            sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
229         {
230           const int BLOCK_SIZE =
231             em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;;
232           const int IV_SIZE =
233             em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
234           esp_footer_t *f0;
235           u8 ip_hdr_size = 0;
236
237           int blocks =
238             (ib[0]->current_length - sizeof (esp_header_t) -
239              IV_SIZE) / BLOCK_SIZE;
240
241           ob[0]->current_data = sizeof (ethernet_header_t);
242
243           /* transport mode */
244           if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
245             {
246               tunnel_mode = 0;
247
248               if (is_ip6)
249                 {
250                   ip_hdr_size = sizeof (ip6_header_t);
251                   ih6 = (ip6_header_t *) ((u8 *) esp0 - ip_hdr_size);
252                   oh6 = vlib_buffer_get_current (ob[0]);
253                 }
254               else
255                 {
256                   ip_hdr_size = sizeof (ip4_header_t);
257                   if (sa0->udp_encap)
258                     ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size -
259                                             sizeof (udp_header_t));
260                   else
261                     ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size);
262                   oh4 = vlib_buffer_get_current (ob[0]);
263                 }
264             }
265
266           esp_decrypt_cbc (sa0->crypto_alg,
267                            esp0->data + IV_SIZE,
268                            (u8 *) vlib_buffer_get_current (ob[0]) +
269                            ip_hdr_size, BLOCK_SIZE * blocks,
270                            sa0->crypto_key.data, esp0->data);
271
272           ob[0]->current_length = (blocks * BLOCK_SIZE) - 2 + ip_hdr_size;
273           ob[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
274           f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (ob[0]) +
275                                  ob[0]->current_length);
276           ob[0]->current_length -= f0->pad_length;
277
278           /* tunnel mode */
279           if (PREDICT_TRUE (tunnel_mode))
280             {
281               if (PREDICT_TRUE (f0->next_header == IP_PROTOCOL_IP_IN_IP))
282                 {
283                   next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
284                   oh4 = vlib_buffer_get_current (ob[0]);
285                 }
286               else if (f0->next_header == IP_PROTOCOL_IPV6)
287                 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
288               else
289                 {
290                   vlib_node_increment_counter (vm, node->node_index,
291                                                ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
292                                                1);
293                   ob[0] = 0;
294                   goto trace;
295                 }
296             }
297           /* transport mode */
298           else
299             {
300               u32 len = vlib_buffer_length_in_chain (vm, ob[0]);
301               if (is_ip6)
302                 {
303                   next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
304                   oh6->ip_version_traffic_class_and_flow_label =
305                     ih6->ip_version_traffic_class_and_flow_label;
306                   oh6->protocol = f0->next_header;
307                   oh6->hop_limit = ih6->hop_limit;
308                   oh6->src_address.as_u64[0] = ih6->src_address.as_u64[0];
309                   oh6->src_address.as_u64[1] = ih6->src_address.as_u64[1];
310                   oh6->dst_address.as_u64[0] = ih6->dst_address.as_u64[0];
311                   oh6->dst_address.as_u64[1] = ih6->dst_address.as_u64[1];
312                   len -= sizeof (ip6_header_t);
313                   oh6->payload_length = clib_host_to_net_u16 (len);
314                 }
315               else
316                 {
317                   next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
318                   oh4->ip_version_and_header_length = 0x45;
319                   oh4->tos = ih4->tos;
320                   oh4->fragment_id = 0;
321                   oh4->flags_and_fragment_offset = 0;
322                   oh4->ttl = ih4->ttl;
323                   oh4->protocol = f0->next_header;
324                   oh4->src_address.as_u32 = ih4->src_address.as_u32;
325                   oh4->dst_address.as_u32 = ih4->dst_address.as_u32;
326                   oh4->length = clib_host_to_net_u16 (len);
327                   oh4->checksum = ip4_header_checksum (oh4);
328                 }
329             }
330
331           /* for IPSec-GRE tunnel next node is ipsec-gre-input */
332           if (PREDICT_FALSE
333               ((vnet_buffer (ib[0])->ipsec.flags) &
334                IPSEC_FLAG_IPSEC_GRE_TUNNEL))
335             next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
336
337           vnet_buffer (ob[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
338           vnet_buffer (ob[0])->sw_if_index[VLIB_RX] =
339             vnet_buffer (ib[0])->sw_if_index[VLIB_RX];
340         }
341
342     trace:
343       if (PREDICT_FALSE (ib[0]->flags & VLIB_BUFFER_IS_TRACED))
344         {
345           if (ob[0])
346             {
347               ob[0]->flags |= VLIB_BUFFER_IS_TRACED;
348               ob[0]->trace_index = ib[0]->trace_index;
349               esp_decrypt_trace_t *tr =
350                 vlib_add_trace (vm, node, ob[0], sizeof (*tr));
351               tr->crypto_alg = sa0->crypto_alg;
352               tr->integ_alg = sa0->integ_alg;
353             }
354         }
355
356       /* next */
357       n_left_from -= 1;
358       ib += 1;
359       ob += 1;
360       next += 1;
361     }
362
363   vlib_node_increment_counter (vm, node->node_index,
364                                ESP_DECRYPT_ERROR_RX_PKTS, n_alloc);
365
366   vlib_buffer_enqueue_to_next (vm, node, new_bufs, nexts, n_alloc);
367 done:
368   vlib_buffer_free (vm, from, from_frame->n_vectors);
369   return n_alloc;
370 }
371
372 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
373                                   vlib_node_runtime_t * node,
374                                   vlib_frame_t * from_frame)
375 {
376   return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
377 }
378
379 /* *INDENT-OFF* */
380 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
381   .name = "esp4-decrypt",
382   .vector_size = sizeof (u32),
383   .format_trace = format_esp_decrypt_trace,
384   .type = VLIB_NODE_TYPE_INTERNAL,
385
386   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
387   .error_strings = esp_decrypt_error_strings,
388
389   .n_next_nodes = ESP_DECRYPT_N_NEXT,
390   .next_nodes = {
391 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
392     foreach_esp_decrypt_next
393 #undef _
394   },
395 };
396 /* *INDENT-ON* */
397
398 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
399                                   vlib_node_runtime_t * node,
400                                   vlib_frame_t * from_frame)
401 {
402   return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
403 }
404
405 /* *INDENT-OFF* */
406 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
407   .name = "esp6-decrypt",
408   .vector_size = sizeof (u32),
409   .format_trace = format_esp_decrypt_trace,
410   .type = VLIB_NODE_TYPE_INTERNAL,
411
412   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
413   .error_strings = esp_decrypt_error_strings,
414
415   .n_next_nodes = ESP_DECRYPT_N_NEXT,
416   .next_nodes = {
417 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
418     foreach_esp_decrypt_next
419 #undef _
420   },
421 };
422 /* *INDENT-ON* */
423
424 /*
425  * fd.io coding-style-patch-verification: ON
426  *
427  * Local Variables:
428  * eval: (c-set-style "gnu")
429  * End:
430  */