crypto: introduce crypto infra
[vpp.git] / src / vnet / ipsec / esp_decrypt.c
1 /*
2  * esp_decrypt.c : IPSec ESP decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25
26 #define foreach_esp_decrypt_next                \
27 _(DROP, "error-drop")                           \
28 _(IP4_INPUT, "ip4-input-no-checksum")           \
29 _(IP6_INPUT, "ip6-input")                       \
30 _(IPSEC_GRE_INPUT, "ipsec-gre-input")
31
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
33 typedef enum
34 {
35   foreach_esp_decrypt_next
36 #undef _
37     ESP_DECRYPT_N_NEXT,
38 } esp_decrypt_next_t;
39
40
41 #define foreach_esp_decrypt_error                   \
42  _(RX_PKTS, "ESP pkts received")                    \
43  _(NO_BUFFER, "No buffer (packed dropped)")         \
44  _(DECRYPTION_FAILED, "ESP decryption failed")      \
45  _(INTEG_ERROR, "Integrity check failed")           \
46  _(REPLAY, "SA replayed packet")                    \
47  _(NOT_IP, "Not IP packet (dropped)")
48
49
50 typedef enum
51 {
52 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
53   foreach_esp_decrypt_error
54 #undef _
55     ESP_DECRYPT_N_ERROR,
56 } esp_decrypt_error_t;
57
58 static char *esp_decrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_decrypt_error
61 #undef _
62 };
63
64 typedef struct
65 {
66   ipsec_crypto_alg_t crypto_alg;
67   ipsec_integ_alg_t integ_alg;
68 } esp_decrypt_trace_t;
69
70 /* packet trace format function */
71 static u8 *
72 format_esp_decrypt_trace (u8 * s, va_list * args)
73 {
74   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76   esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
77
78   s = format (s, "esp: crypto %U integrity %U",
79               format_ipsec_crypto_alg, t->crypto_alg,
80               format_ipsec_integ_alg, t->integ_alg);
81   return s;
82 }
83
84 always_inline void
85 esp_decrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg,
86                  u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
87 {
88   ipsec_main_t *im = &ipsec_main;
89   ipsec_main_crypto_alg_t *a;
90   vnet_crypto_op_t _op, *op = &_op;
91
92   ASSERT (alg < IPSEC_CRYPTO_N_ALG);
93
94   a = &im->crypto_algs[alg];
95
96   if (PREDICT_FALSE (a->dec_op_type == VNET_CRYPTO_OP_NONE))
97     return;
98
99   op->op = a->dec_op_type;
100   op->iv = iv;
101   op->src = in;
102   op->dst = out;
103   op->len = in_len;
104   op->key = key;
105
106   vnet_crypto_process_ops (vm, op, 1);
107 }
108
109 always_inline uword
110 esp_decrypt_inline (vlib_main_t * vm,
111                     vlib_node_runtime_t * node, vlib_frame_t * from_frame,
112                     int is_ip6)
113 {
114   ipsec_main_t *im = &ipsec_main;
115   u32 *from = vlib_frame_vector_args (from_frame);
116   u32 n_left_from = from_frame->n_vectors;
117   u32 new_bufs[VLIB_FRAME_SIZE];
118   vlib_buffer_t *i_bufs[VLIB_FRAME_SIZE], **ib = i_bufs;
119   vlib_buffer_t *o_bufs[VLIB_FRAME_SIZE], **ob = o_bufs;
120   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
121   u32 n_alloc, thread_index = vm->thread_index;
122
123   n_alloc = vlib_buffer_alloc (vm, new_bufs, n_left_from);
124   if (n_alloc != n_left_from)
125     {
126       vlib_node_increment_counter (vm, node->node_index,
127                                    ESP_DECRYPT_ERROR_NO_BUFFER,
128                                    n_left_from - n_alloc);
129       if (n_alloc == 0)
130         goto done;
131       n_left_from = n_alloc;
132     }
133
134   vlib_get_buffers (vm, from, ib, n_left_from);
135   vlib_get_buffers (vm, new_bufs, ob, n_left_from);
136
137   while (n_left_from > 0)
138     {
139       esp_header_t *esp0;
140       ipsec_sa_t *sa0;
141       u32 sa_index0 = ~0;
142       u32 seq;
143       ip4_header_t *ih4 = 0, *oh4 = 0;
144       ip6_header_t *ih6 = 0, *oh6 = 0;
145       u8 tunnel_mode = 1;
146
147       next[0] = ESP_DECRYPT_NEXT_DROP;
148
149       esp0 = vlib_buffer_get_current (ib[0]);
150       sa_index0 = vnet_buffer (ib[0])->ipsec.sad_index;
151       sa0 = pool_elt_at_index (im->sad, sa_index0);
152       seq = clib_host_to_net_u32 (esp0->seq);
153
154       /* anti-replay check */
155       if (sa0->use_anti_replay)
156         {
157           int rv = 0;
158
159           if (PREDICT_TRUE (sa0->use_esn))
160             rv = esp_replay_check_esn (sa0, seq);
161           else
162             rv = esp_replay_check (sa0, seq);
163
164           if (PREDICT_FALSE (rv))
165             {
166               u32 tmp, off = n_alloc - n_left_from;
167               /* send original packet to drop node */
168               tmp = from[off];
169               from[off] = new_bufs[off];
170               new_bufs[off] = tmp;
171               ib[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
172               next[0] = ESP_DECRYPT_NEXT_DROP;
173               goto trace;
174             }
175         }
176
177       vlib_increment_combined_counter
178         (&ipsec_sa_counters, thread_index, sa_index0,
179          1, ib[0]->current_length);
180
181       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
182         {
183           u8 sig[64];
184           int icv_size = im->integ_algs[sa0->integ_alg].trunc_size;
185           clib_memset (sig, 0, sizeof (sig));
186           u8 *icv = vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
187             icv_size;
188           ib[0]->current_length -= icv_size;
189
190           hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
191                      sa0->integ_key.len, (u8 *) esp0,
192                      ib[0]->current_length, sig, sa0->use_esn, sa0->seq_hi);
193
194           if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
195             {
196               u32 tmp, off = n_alloc - n_left_from;
197               /* send original packet to drop node */
198               tmp = from[off];
199               from[off] = new_bufs[off];
200               new_bufs[off] = tmp;
201               ib[0]->error = node->errors[ESP_DECRYPT_ERROR_INTEG_ERROR];
202               next[0] = ESP_DECRYPT_NEXT_DROP;
203               goto trace;
204             }
205         }
206
207       if (PREDICT_TRUE (sa0->use_anti_replay))
208         {
209           if (PREDICT_TRUE (sa0->use_esn))
210             esp_replay_advance_esn (sa0, seq);
211           else
212             esp_replay_advance (sa0, seq);
213         }
214
215       if ((sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
216            sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256) ||
217           (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
218            sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
219         {
220           const int BLOCK_SIZE = im->crypto_algs[sa0->crypto_alg].block_size;
221           const int IV_SIZE = im->crypto_algs[sa0->crypto_alg].iv_size;
222           esp_footer_t *f0;
223           u8 ip_hdr_size = 0;
224
225           int blocks =
226             (ib[0]->current_length - sizeof (esp_header_t) -
227              IV_SIZE) / BLOCK_SIZE;
228
229           ob[0]->current_data = sizeof (ethernet_header_t);
230
231           /* transport mode */
232           if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
233             {
234               tunnel_mode = 0;
235
236               if (is_ip6)
237                 {
238                   ip_hdr_size = sizeof (ip6_header_t);
239                   ih6 = (ip6_header_t *) ((u8 *) esp0 - ip_hdr_size);
240                   oh6 = vlib_buffer_get_current (ob[0]);
241                 }
242               else
243                 {
244                   ip_hdr_size = sizeof (ip4_header_t);
245                   if (sa0->udp_encap)
246                     ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size -
247                                             sizeof (udp_header_t));
248                   else
249                     ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size);
250                   oh4 = vlib_buffer_get_current (ob[0]);
251                 }
252             }
253
254           esp_decrypt_cbc (vm, sa0->crypto_alg,
255                            esp0->data + IV_SIZE,
256                            (u8 *) vlib_buffer_get_current (ob[0]) +
257                            ip_hdr_size, BLOCK_SIZE * blocks,
258                            sa0->crypto_key.data, esp0->data);
259
260           ob[0]->current_length = (blocks * BLOCK_SIZE) - 2 + ip_hdr_size;
261           ob[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
262           f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (ob[0]) +
263                                  ob[0]->current_length);
264           ob[0]->current_length -= f0->pad_length;
265
266           /* tunnel mode */
267           if (PREDICT_TRUE (tunnel_mode))
268             {
269               if (PREDICT_TRUE (f0->next_header == IP_PROTOCOL_IP_IN_IP))
270                 {
271                   next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
272                   oh4 = vlib_buffer_get_current (ob[0]);
273                 }
274               else if (f0->next_header == IP_PROTOCOL_IPV6)
275                 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
276               else
277                 {
278                   vlib_node_increment_counter (vm, node->node_index,
279                                                ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
280                                                1);
281                   ob[0] = 0;
282                   goto trace;
283                 }
284             }
285           /* transport mode */
286           else
287             {
288               u32 len = vlib_buffer_length_in_chain (vm, ob[0]);
289               if (is_ip6)
290                 {
291                   next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
292                   oh6->ip_version_traffic_class_and_flow_label =
293                     ih6->ip_version_traffic_class_and_flow_label;
294                   oh6->protocol = f0->next_header;
295                   oh6->hop_limit = ih6->hop_limit;
296                   oh6->src_address.as_u64[0] = ih6->src_address.as_u64[0];
297                   oh6->src_address.as_u64[1] = ih6->src_address.as_u64[1];
298                   oh6->dst_address.as_u64[0] = ih6->dst_address.as_u64[0];
299                   oh6->dst_address.as_u64[1] = ih6->dst_address.as_u64[1];
300                   len -= sizeof (ip6_header_t);
301                   oh6->payload_length = clib_host_to_net_u16 (len);
302                 }
303               else
304                 {
305                   next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
306                   oh4->ip_version_and_header_length = 0x45;
307                   oh4->tos = ih4->tos;
308                   oh4->fragment_id = 0;
309                   oh4->flags_and_fragment_offset = 0;
310                   oh4->ttl = ih4->ttl;
311                   oh4->protocol = f0->next_header;
312                   oh4->src_address.as_u32 = ih4->src_address.as_u32;
313                   oh4->dst_address.as_u32 = ih4->dst_address.as_u32;
314                   oh4->length = clib_host_to_net_u16 (len);
315                   oh4->checksum = ip4_header_checksum (oh4);
316                 }
317             }
318
319           /* for IPSec-GRE tunnel next node is ipsec-gre-input */
320           if (PREDICT_FALSE
321               ((vnet_buffer (ib[0])->ipsec.flags) &
322                IPSEC_FLAG_IPSEC_GRE_TUNNEL))
323             next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
324
325           vnet_buffer (ob[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
326           vnet_buffer (ob[0])->sw_if_index[VLIB_RX] =
327             vnet_buffer (ib[0])->sw_if_index[VLIB_RX];
328         }
329
330     trace:
331       if (PREDICT_FALSE (ib[0]->flags & VLIB_BUFFER_IS_TRACED))
332         {
333           if (ob[0])
334             {
335               ob[0]->flags |= VLIB_BUFFER_IS_TRACED;
336               ob[0]->trace_index = ib[0]->trace_index;
337               esp_decrypt_trace_t *tr =
338                 vlib_add_trace (vm, node, ob[0], sizeof (*tr));
339               tr->crypto_alg = sa0->crypto_alg;
340               tr->integ_alg = sa0->integ_alg;
341             }
342         }
343
344       /* next */
345       n_left_from -= 1;
346       ib += 1;
347       ob += 1;
348       next += 1;
349     }
350
351   vlib_node_increment_counter (vm, node->node_index,
352                                ESP_DECRYPT_ERROR_RX_PKTS, n_alloc);
353
354   vlib_buffer_enqueue_to_next (vm, node, new_bufs, nexts, n_alloc);
355 done:
356   vlib_buffer_free (vm, from, from_frame->n_vectors);
357   return n_alloc;
358 }
359
360 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
361                                   vlib_node_runtime_t * node,
362                                   vlib_frame_t * from_frame)
363 {
364   return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
365 }
366
367 /* *INDENT-OFF* */
368 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
369   .name = "esp4-decrypt",
370   .vector_size = sizeof (u32),
371   .format_trace = format_esp_decrypt_trace,
372   .type = VLIB_NODE_TYPE_INTERNAL,
373
374   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
375   .error_strings = esp_decrypt_error_strings,
376
377   .n_next_nodes = ESP_DECRYPT_N_NEXT,
378   .next_nodes = {
379 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
380     foreach_esp_decrypt_next
381 #undef _
382   },
383 };
384 /* *INDENT-ON* */
385
386 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
387                                   vlib_node_runtime_t * node,
388                                   vlib_frame_t * from_frame)
389 {
390   return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
391 }
392
393 /* *INDENT-OFF* */
394 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
395   .name = "esp6-decrypt",
396   .vector_size = sizeof (u32),
397   .format_trace = format_esp_decrypt_trace,
398   .type = VLIB_NODE_TYPE_INTERNAL,
399
400   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
401   .error_strings = esp_decrypt_error_strings,
402
403   .n_next_nodes = ESP_DECRYPT_N_NEXT,
404   .next_nodes = {
405 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
406     foreach_esp_decrypt_next
407 #undef _
408   },
409 };
410 /* *INDENT-ON* */
411
412 /*
413  * fd.io coding-style-patch-verification: ON
414  *
415  * Local Variables:
416  * eval: (c-set-style "gnu")
417  * End:
418  */