f46fa6e2161a021f251d9f1fc8762cec214ee0e3
[vpp.git] / src / vnet / ipsec / ah_decrypt.c
1 /*
2  * ah_decrypt.c : IPSec AH decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 #include <vnet/ipsec/ipsec_io.h>
26
27 #define foreach_ah_decrypt_next \
28   _ (DROP, "error-drop")        \
29   _ (IP4_INPUT, "ip4-input")    \
30   _ (IP6_INPUT, "ip6-input")
31
32 #define _(v, s) AH_DECRYPT_NEXT_##v,
33 typedef enum
34 {
35   foreach_ah_decrypt_next
36 #undef _
37     AH_DECRYPT_N_NEXT,
38 } ah_decrypt_next_t;
39
40 #define foreach_ah_decrypt_error                \
41   _ (RX_PKTS, "AH pkts received")               \
42   _ (DECRYPTION_FAILED, "AH decryption failed") \
43   _ (INTEG_ERROR, "Integrity check failed")     \
44   _ (NO_TAIL_SPACE, "not enough buffer tail space (dropped)")     \
45   _ (DROP_FRAGMENTS, "IP fragments drop")       \
46   _ (REPLAY, "SA replayed packet")
47
48 typedef enum
49 {
50 #define _(sym,str) AH_DECRYPT_ERROR_##sym,
51   foreach_ah_decrypt_error
52 #undef _
53     AH_DECRYPT_N_ERROR,
54 } ah_decrypt_error_t;
55
56 static char *ah_decrypt_error_strings[] = {
57 #define _(sym,string) string,
58   foreach_ah_decrypt_error
59 #undef _
60 };
61
62 typedef struct
63 {
64   ipsec_integ_alg_t integ_alg;
65   u32 seq_num;
66 } ah_decrypt_trace_t;
67
68 /* packet trace format function */
69 static u8 *
70 format_ah_decrypt_trace (u8 * s, va_list * args)
71 {
72   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
73   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
74   ah_decrypt_trace_t *t = va_arg (*args, ah_decrypt_trace_t *);
75
76   s = format (s, "ah: integrity %U seq-num %d",
77               format_ipsec_integ_alg, t->integ_alg, t->seq_num);
78   return s;
79 }
80
81 typedef struct
82 {
83   union
84   {
85     struct
86     {
87       u8 hop_limit;
88       u8 nexthdr;
89       u32 ip_version_traffic_class_and_flow_label;
90     };
91
92     struct
93     {
94       u8 ttl;
95       u8 tos;
96     };
97   };
98   u32 sa_index;
99   u32 seq;
100   u8 icv_padding_len;
101   u8 icv_size;
102   u8 ip_hdr_size;
103   i16 current_data;
104   u8 nexthdr_cached;
105 } ah_decrypt_packet_data_t;
106
107 static_always_inline void
108 ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
109                 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
110 {
111   u32 n_fail, n_ops = vec_len (ops);
112   vnet_crypto_op_t *op = ops;
113
114   if (n_ops == 0)
115     return;
116
117   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
118
119   while (n_fail)
120     {
121       ASSERT (op - ops < n_ops);
122
123       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
124         {
125           u32 bi = op->user_data;
126           b[bi]->error = node->errors[AH_DECRYPT_ERROR_INTEG_ERROR];
127           nexts[bi] = AH_DECRYPT_NEXT_DROP;
128           n_fail--;
129         }
130       op++;
131     }
132 }
133
134 always_inline uword
135 ah_decrypt_inline (vlib_main_t * vm,
136                    vlib_node_runtime_t * node, vlib_frame_t * from_frame,
137                    int is_ip6)
138 {
139   u32 n_left, *from;
140   u32 thread_index = vm->thread_index;
141   u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
142   ah_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
143   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
144   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
145   ipsec_main_t *im = &ipsec_main;
146   ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
147   from = vlib_frame_vector_args (from_frame);
148   n_left = from_frame->n_vectors;
149   ipsec_sa_t *sa0 = 0;
150   u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
151
152   clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
153   vlib_get_buffers (vm, from, b, n_left);
154   clib_memset_u16 (nexts, -1, n_left);
155   vec_reset_length (ptd->integ_ops);
156
157   while (n_left > 0)
158     {
159       ah_header_t *ah0;
160       ip4_header_t *ih4;
161       ip6_header_t *ih6;
162
163       if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
164         {
165           if (current_sa_index != ~0)
166             vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
167                                              current_sa_index,
168                                              current_sa_pkts,
169                                              current_sa_bytes);
170           current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
171           sa0 = pool_elt_at_index (im->sad, current_sa_index);
172
173           current_sa_bytes = current_sa_pkts = 0;
174           vlib_prefetch_combined_counter (&ipsec_sa_counters,
175                                           thread_index, current_sa_index);
176         }
177
178       pd->sa_index = current_sa_index;
179
180       ih4 = vlib_buffer_get_current (b[0]);
181       ih6 = vlib_buffer_get_current (b[0]);
182       pd->current_data = b[0]->current_data;
183
184       if (is_ip6)
185         {
186           ip6_ext_header_t *prev = NULL;
187           ah0 =
188             ip6_ext_header_find (vm, b[0], ih6, IP_PROTOCOL_IPSEC_AH, &prev);
189           pd->ip_hdr_size = sizeof (ip6_header_t);
190           ASSERT ((u8 *) ah0 - (u8 *) ih6 == pd->ip_hdr_size);
191         }
192       else
193         {
194           if (ip4_is_fragment (ih4))
195             {
196               b[0]->error = node->errors[AH_DECRYPT_ERROR_DROP_FRAGMENTS];
197               next[0] = AH_DECRYPT_NEXT_DROP;
198               goto next;
199             }
200           pd->ip_hdr_size = ip4_header_bytes (ih4);
201           ah0 = (ah_header_t *) ((u8 *) ih4 + pd->ip_hdr_size);
202         }
203
204       pd->seq = clib_host_to_net_u32 (ah0->seq_no);
205
206       /* anti-replay check */
207       if (ipsec_sa_anti_replay_check (sa0, pd->seq))
208         {
209           b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
210           next[0] = AH_DECRYPT_NEXT_DROP;
211           goto next;
212         }
213
214       current_sa_bytes += b[0]->current_length;
215       current_sa_pkts += 1;
216
217       pd->icv_size = sa0->integ_icv_size;
218       pd->nexthdr_cached = ah0->nexthdr;
219       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
220         {
221           if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
222                              pd->current_data + b[0]->current_length
223                              + sizeof (u32) > buffer_data_size))
224             {
225               b[0]->error = node->errors[AH_DECRYPT_ERROR_NO_TAIL_SPACE];
226               next[0] = AH_DECRYPT_NEXT_DROP;
227               goto next;
228             }
229
230           vnet_crypto_op_t *op;
231           vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
232           vnet_crypto_op_init (op, sa0->integ_op_id);
233
234           op->src = (u8 *) ih4;
235           op->len = b[0]->current_length;
236           op->digest = (u8 *) ih4 - pd->icv_size;
237           op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
238           op->digest_len = pd->icv_size;
239           op->key_index = sa0->integ_key_index;
240           op->user_data = b - bufs;
241           if (ipsec_sa_is_set_USE_ESN (sa0))
242             {
243               u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
244
245               op->len += sizeof (seq_hi);
246               clib_memcpy (op->src + b[0]->current_length, &seq_hi,
247                            sizeof (seq_hi));
248             }
249           clib_memcpy (op->digest, ah0->auth_data, pd->icv_size);
250           clib_memset (ah0->auth_data, 0, pd->icv_size);
251
252           if (is_ip6)
253             {
254               pd->ip_version_traffic_class_and_flow_label =
255                 ih6->ip_version_traffic_class_and_flow_label;
256               pd->hop_limit = ih6->hop_limit;
257               ih6->ip_version_traffic_class_and_flow_label = 0x60;
258               ih6->hop_limit = 0;
259               pd->nexthdr = ah0->nexthdr;
260               pd->icv_padding_len =
261                 ah_calc_icv_padding_len (pd->icv_size, 1 /* is_ipv6 */ );
262             }
263           else
264             {
265               pd->tos = ih4->tos;
266               pd->ttl = ih4->ttl;
267               ih4->tos = 0;
268               ih4->ttl = 0;
269               ih4->checksum = 0;
270               pd->icv_padding_len =
271                 ah_calc_icv_padding_len (pd->icv_size, 0 /* is_ipv6 */ );
272             }
273         }
274
275     next:
276       n_left -= 1;
277       pd += 1;
278       next += 1;
279       b += 1;
280     }
281
282   n_left = from_frame->n_vectors;
283   next = nexts;
284   pd = pkt_data;
285   b = bufs;
286
287   vlib_node_increment_counter (vm, node->node_index, AH_DECRYPT_ERROR_RX_PKTS,
288                                n_left);
289   vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
290                                    current_sa_index, current_sa_pkts,
291                                    current_sa_bytes);
292
293   ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
294
295   while (n_left > 0)
296     {
297       ip4_header_t *oh4;
298       ip6_header_t *oh6;
299
300       if (next[0] < AH_DECRYPT_N_NEXT)
301         goto trace;
302
303       sa0 = vec_elt_at_index (im->sad, pd->sa_index);
304
305       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
306         {
307           /* redo the anit-reply check. see esp_decrypt for details */
308           if (ipsec_sa_anti_replay_check (sa0, pd->seq))
309             {
310               b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
311               next[0] = AH_DECRYPT_NEXT_DROP;
312               goto trace;
313             }
314           ipsec_sa_anti_replay_advance (sa0, pd->seq);
315         }
316
317       u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
318         + pd->icv_padding_len;
319       vlib_buffer_advance (b[0], pd->ip_hdr_size + ah_hdr_len);
320       b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
321
322       if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
323         {                       /* tunnel mode */
324           if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
325             next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
326           else if (pd->nexthdr_cached == IP_PROTOCOL_IPV6)
327             next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
328           else
329             {
330               b[0]->error = node->errors[AH_DECRYPT_ERROR_DECRYPTION_FAILED];
331               next[0] = AH_DECRYPT_NEXT_DROP;
332               goto trace;
333             }
334         }
335       else
336         {                       /* transport mode */
337           if (is_ip6)
338             {
339               vlib_buffer_advance (b[0], -sizeof (ip6_header_t));
340               oh6 = vlib_buffer_get_current (b[0]);
341               if (ah_hdr_len >= sizeof (ip6_header_t))
342                 clib_memcpy (oh6, b[0]->data + pd->current_data,
343                              sizeof (ip6_header_t));
344               else
345                 memmove (oh6, b[0]->data + pd->current_data,
346                          sizeof (ip6_header_t));
347
348               next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
349               oh6->protocol = pd->nexthdr;
350               oh6->hop_limit = pd->hop_limit;
351               oh6->ip_version_traffic_class_and_flow_label =
352                 pd->ip_version_traffic_class_and_flow_label;
353               oh6->payload_length =
354                 clib_host_to_net_u16 (vlib_buffer_length_in_chain
355                                       (vm, b[0]) - sizeof (ip6_header_t));
356             }
357           else
358             {
359               vlib_buffer_advance (b[0], -sizeof (ip4_header_t));
360               oh4 = vlib_buffer_get_current (b[0]);
361               if (ah_hdr_len >= sizeof (ip4_header_t))
362                 clib_memcpy (oh4, b[0]->data + pd->current_data,
363                              sizeof (ip4_header_t));
364               else
365                 memmove (oh4, b[0]->data + pd->current_data,
366                          sizeof (ip4_header_t));
367
368               next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
369               oh4->ip_version_and_header_length = 0x45;
370               oh4->fragment_id = 0;
371               oh4->flags_and_fragment_offset = 0;
372               oh4->protocol = pd->nexthdr_cached;
373               oh4->length =
374                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
375               oh4->ttl = pd->ttl;
376               oh4->tos = pd->tos;
377               oh4->checksum = ip4_header_checksum (oh4);
378             }
379         }
380
381       vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
382     trace:
383       if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
384         {
385           sa0 = pool_elt_at_index (im->sad,
386                                    vnet_buffer (b[0])->ipsec.sad_index);
387           ah_decrypt_trace_t *tr =
388             vlib_add_trace (vm, node, b[0], sizeof (*tr));
389           tr->integ_alg = sa0->integ_alg;
390           tr->seq_num = pd->seq;
391         }
392
393       n_left -= 1;
394       pd += 1;
395       next += 1;
396       b += 1;
397     }
398
399   n_left = from_frame->n_vectors;
400   vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
401
402   return n_left;
403 }
404
405 VLIB_NODE_FN (ah4_decrypt_node) (vlib_main_t * vm,
406                                  vlib_node_runtime_t * node,
407                                  vlib_frame_t * from_frame)
408 {
409   return ah_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
410 }
411
412 /* *INDENT-OFF* */
413 VLIB_REGISTER_NODE (ah4_decrypt_node) = {
414   .name = "ah4-decrypt",
415   .vector_size = sizeof (u32),
416   .format_trace = format_ah_decrypt_trace,
417   .type = VLIB_NODE_TYPE_INTERNAL,
418
419   .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
420   .error_strings = ah_decrypt_error_strings,
421
422   .n_next_nodes = AH_DECRYPT_N_NEXT,
423   .next_nodes = {
424 #define _(s,n) [AH_DECRYPT_NEXT_##s] = n,
425     foreach_ah_decrypt_next
426 #undef _
427   },
428 };
429 /* *INDENT-ON* */
430
431 VLIB_NODE_FN (ah6_decrypt_node) (vlib_main_t * vm,
432                                  vlib_node_runtime_t * node,
433                                  vlib_frame_t * from_frame)
434 {
435   return ah_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
436 }
437
438 /* *INDENT-OFF* */
439 VLIB_REGISTER_NODE (ah6_decrypt_node) = {
440   .name = "ah6-decrypt",
441   .vector_size = sizeof (u32),
442   .format_trace = format_ah_decrypt_trace,
443   .type = VLIB_NODE_TYPE_INTERNAL,
444
445   .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
446   .error_strings = ah_decrypt_error_strings,
447
448   .n_next_nodes = AH_DECRYPT_N_NEXT,
449   .next_nodes = {
450 #define _(s,n) [AH_DECRYPT_NEXT_##s] = n,
451     foreach_ah_decrypt_next
452 #undef _
453   },
454 };
455 /* *INDENT-ON* */
456
457 /*
458  * fd.io coding-style-patch-verification: ON
459  *
460  * Local Variables:
461  * eval: (c-set-style "gnu")
462  * End:
463  */