ipsec: Fix setting the hi-sequence number for decrypt
[vpp.git] / src / vnet / ipsec / ah_decrypt.c
1 /*
2  * ah_decrypt.c : IPSec AH decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 #include <vnet/ipsec/ipsec_io.h>
26
27 #define foreach_ah_decrypt_next                 \
28   _(DROP, "error-drop")                         \
29   _(IP4_INPUT, "ip4-input")                     \
30   _(IP6_INPUT, "ip6-input")                     \
31   _(HANDOFF, "handoff")
32
33 #define _(v, s) AH_DECRYPT_NEXT_##v,
34 typedef enum
35 {
36   foreach_ah_decrypt_next
37 #undef _
38     AH_DECRYPT_N_NEXT,
39 } ah_decrypt_next_t;
40
41 #define foreach_ah_decrypt_error                \
42   _ (RX_PKTS, "AH pkts received")               \
43   _ (DECRYPTION_FAILED, "AH decryption failed") \
44   _ (INTEG_ERROR, "Integrity check failed")     \
45   _ (NO_TAIL_SPACE, "not enough buffer tail space (dropped)")     \
46   _ (DROP_FRAGMENTS, "IP fragments drop")       \
47   _ (REPLAY, "SA replayed packet")
48
49 typedef enum
50 {
51 #define _(sym,str) AH_DECRYPT_ERROR_##sym,
52   foreach_ah_decrypt_error
53 #undef _
54     AH_DECRYPT_N_ERROR,
55 } ah_decrypt_error_t;
56
57 static char *ah_decrypt_error_strings[] = {
58 #define _(sym,string) string,
59   foreach_ah_decrypt_error
60 #undef _
61 };
62
63 typedef struct
64 {
65   ipsec_integ_alg_t integ_alg;
66   u32 seq_num;
67 } ah_decrypt_trace_t;
68
69 /* packet trace format function */
70 static u8 *
71 format_ah_decrypt_trace (u8 * s, va_list * args)
72 {
73   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75   ah_decrypt_trace_t *t = va_arg (*args, ah_decrypt_trace_t *);
76
77   s = format (s, "ah: integrity %U seq-num %d",
78               format_ipsec_integ_alg, t->integ_alg, t->seq_num);
79   return s;
80 }
81
82 typedef struct
83 {
84   union
85   {
86     struct
87     {
88       u8 hop_limit;
89       u8 nexthdr;
90       u32 ip_version_traffic_class_and_flow_label;
91     };
92
93     struct
94     {
95       u8 ttl;
96       u8 tos;
97     };
98   };
99   u32 sa_index;
100   u32 seq;
101   u32 seq_hi;
102   u8 icv_padding_len;
103   u8 icv_size;
104   u8 ip_hdr_size;
105   i16 current_data;
106   u8 nexthdr_cached;
107 } ah_decrypt_packet_data_t;
108
109 static_always_inline void
110 ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
111                 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
112 {
113   u32 n_fail, n_ops = vec_len (ops);
114   vnet_crypto_op_t *op = ops;
115
116   if (n_ops == 0)
117     return;
118
119   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
120
121   while (n_fail)
122     {
123       ASSERT (op - ops < n_ops);
124
125       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
126         {
127           u32 bi = op->user_data;
128           b[bi]->error = node->errors[AH_DECRYPT_ERROR_INTEG_ERROR];
129           nexts[bi] = AH_DECRYPT_NEXT_DROP;
130           n_fail--;
131         }
132       op++;
133     }
134 }
135
136 always_inline uword
137 ah_decrypt_inline (vlib_main_t * vm,
138                    vlib_node_runtime_t * node, vlib_frame_t * from_frame,
139                    int is_ip6)
140 {
141   u32 n_left, *from;
142   u32 thread_index = vm->thread_index;
143   u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
144   ah_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
145   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
146   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
147   ipsec_main_t *im = &ipsec_main;
148   ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
149   from = vlib_frame_vector_args (from_frame);
150   n_left = from_frame->n_vectors;
151   ipsec_sa_t *sa0 = 0;
152   u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
153
154   clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
155   vlib_get_buffers (vm, from, b, n_left);
156   clib_memset_u16 (nexts, -1, n_left);
157   vec_reset_length (ptd->integ_ops);
158
159   while (n_left > 0)
160     {
161       ah_header_t *ah0;
162       ip4_header_t *ih4;
163       ip6_header_t *ih6;
164
165       if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
166         {
167           if (current_sa_index != ~0)
168             vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
169                                              current_sa_index,
170                                              current_sa_pkts,
171                                              current_sa_bytes);
172           current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
173           sa0 = ipsec_sa_get (current_sa_index);
174
175           current_sa_bytes = current_sa_pkts = 0;
176           vlib_prefetch_combined_counter (&ipsec_sa_counters,
177                                           thread_index, current_sa_index);
178         }
179
180       if (PREDICT_FALSE (~0 == sa0->thread_index))
181         {
182           /* this is the first packet to use this SA, claim the SA
183            * for this thread. this could happen simultaneously on
184            * another thread */
185           clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
186                                     ipsec_sa_assign_thread (thread_index));
187         }
188
189       if (PREDICT_TRUE (thread_index != sa0->thread_index))
190         {
191           vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
192           next[0] = AH_DECRYPT_NEXT_HANDOFF;
193           goto next;
194         }
195
196       pd->sa_index = current_sa_index;
197
198       ih4 = vlib_buffer_get_current (b[0]);
199       ih6 = vlib_buffer_get_current (b[0]);
200       pd->current_data = b[0]->current_data;
201
202       if (is_ip6)
203         {
204           ip6_ext_header_t *prev = NULL;
205           ah0 =
206             ip6_ext_header_find (vm, b[0], ih6, IP_PROTOCOL_IPSEC_AH, &prev);
207           pd->ip_hdr_size = sizeof (ip6_header_t);
208           ASSERT ((u8 *) ah0 - (u8 *) ih6 == pd->ip_hdr_size);
209         }
210       else
211         {
212           if (ip4_is_fragment (ih4))
213             {
214               b[0]->error = node->errors[AH_DECRYPT_ERROR_DROP_FRAGMENTS];
215               next[0] = AH_DECRYPT_NEXT_DROP;
216               goto next;
217             }
218           pd->ip_hdr_size = ip4_header_bytes (ih4);
219           ah0 = (ah_header_t *) ((u8 *) ih4 + pd->ip_hdr_size);
220         }
221
222       pd->seq = clib_host_to_net_u32 (ah0->seq_no);
223
224       /* anti-replay check */
225       if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
226                                                &pd->seq_hi))
227         {
228           b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
229           next[0] = AH_DECRYPT_NEXT_DROP;
230           goto next;
231         }
232
233       current_sa_bytes += b[0]->current_length;
234       current_sa_pkts += 1;
235
236       pd->icv_size = sa0->integ_icv_size;
237       pd->nexthdr_cached = ah0->nexthdr;
238       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
239         {
240           if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
241                              pd->current_data + b[0]->current_length
242                              + sizeof (u32) > buffer_data_size))
243             {
244               b[0]->error = node->errors[AH_DECRYPT_ERROR_NO_TAIL_SPACE];
245               next[0] = AH_DECRYPT_NEXT_DROP;
246               goto next;
247             }
248
249           vnet_crypto_op_t *op;
250           vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
251           vnet_crypto_op_init (op, sa0->integ_op_id);
252
253           op->src = (u8 *) ih4;
254           op->len = b[0]->current_length;
255           op->digest = (u8 *) ih4 - pd->icv_size;
256           op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
257           op->digest_len = pd->icv_size;
258           op->key_index = sa0->integ_key_index;
259           op->user_data = b - bufs;
260           if (ipsec_sa_is_set_USE_ESN (sa0))
261             {
262               u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
263
264               op->len += sizeof (seq_hi);
265               clib_memcpy (op->src + b[0]->current_length, &seq_hi,
266                            sizeof (seq_hi));
267             }
268           clib_memcpy (op->digest, ah0->auth_data, pd->icv_size);
269           clib_memset (ah0->auth_data, 0, pd->icv_size);
270
271           if (is_ip6)
272             {
273               pd->ip_version_traffic_class_and_flow_label =
274                 ih6->ip_version_traffic_class_and_flow_label;
275               pd->hop_limit = ih6->hop_limit;
276               ih6->ip_version_traffic_class_and_flow_label = 0x60;
277               ih6->hop_limit = 0;
278               pd->nexthdr = ah0->nexthdr;
279               pd->icv_padding_len =
280                 ah_calc_icv_padding_len (pd->icv_size, 1 /* is_ipv6 */ );
281             }
282           else
283             {
284               pd->tos = ih4->tos;
285               pd->ttl = ih4->ttl;
286               ih4->tos = 0;
287               ih4->ttl = 0;
288               ih4->checksum = 0;
289               pd->icv_padding_len =
290                 ah_calc_icv_padding_len (pd->icv_size, 0 /* is_ipv6 */ );
291             }
292         }
293
294     next:
295       n_left -= 1;
296       pd += 1;
297       next += 1;
298       b += 1;
299     }
300
301   n_left = from_frame->n_vectors;
302   next = nexts;
303   pd = pkt_data;
304   b = bufs;
305
306   vlib_node_increment_counter (vm, node->node_index, AH_DECRYPT_ERROR_RX_PKTS,
307                                n_left);
308   vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
309                                    current_sa_index, current_sa_pkts,
310                                    current_sa_bytes);
311
312   ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
313
314   while (n_left > 0)
315     {
316       ip4_header_t *oh4;
317       ip6_header_t *oh6;
318
319       if (next[0] < AH_DECRYPT_N_NEXT)
320         goto trace;
321
322       sa0 = ipsec_sa_get (pd->sa_index);
323
324       if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
325         {
326           /* redo the anit-reply check. see esp_decrypt for details */
327           if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi,
328                                                    true, NULL))
329             {
330               b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
331               next[0] = AH_DECRYPT_NEXT_DROP;
332               goto trace;
333             }
334           ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi);
335         }
336
337       u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
338         + pd->icv_padding_len;
339       vlib_buffer_advance (b[0], pd->ip_hdr_size + ah_hdr_len);
340       b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
341
342       if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
343         {                       /* tunnel mode */
344           if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
345             next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
346           else if (pd->nexthdr_cached == IP_PROTOCOL_IPV6)
347             next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
348           else
349             {
350               b[0]->error = node->errors[AH_DECRYPT_ERROR_DECRYPTION_FAILED];
351               next[0] = AH_DECRYPT_NEXT_DROP;
352               goto trace;
353             }
354         }
355       else
356         {                       /* transport mode */
357           if (is_ip6)
358             {
359               vlib_buffer_advance (b[0], -sizeof (ip6_header_t));
360               oh6 = vlib_buffer_get_current (b[0]);
361               if (ah_hdr_len >= sizeof (ip6_header_t))
362                 clib_memcpy (oh6, b[0]->data + pd->current_data,
363                              sizeof (ip6_header_t));
364               else
365                 memmove (oh6, b[0]->data + pd->current_data,
366                          sizeof (ip6_header_t));
367
368               next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
369               oh6->protocol = pd->nexthdr;
370               oh6->hop_limit = pd->hop_limit;
371               oh6->ip_version_traffic_class_and_flow_label =
372                 pd->ip_version_traffic_class_and_flow_label;
373               oh6->payload_length =
374                 clib_host_to_net_u16 (vlib_buffer_length_in_chain
375                                       (vm, b[0]) - sizeof (ip6_header_t));
376             }
377           else
378             {
379               vlib_buffer_advance (b[0], -sizeof (ip4_header_t));
380               oh4 = vlib_buffer_get_current (b[0]);
381               if (ah_hdr_len >= sizeof (ip4_header_t))
382                 clib_memcpy (oh4, b[0]->data + pd->current_data,
383                              sizeof (ip4_header_t));
384               else
385                 memmove (oh4, b[0]->data + pd->current_data,
386                          sizeof (ip4_header_t));
387
388               next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
389               oh4->ip_version_and_header_length = 0x45;
390               oh4->fragment_id = 0;
391               oh4->flags_and_fragment_offset = 0;
392               oh4->protocol = pd->nexthdr_cached;
393               oh4->length =
394                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
395               oh4->ttl = pd->ttl;
396               oh4->tos = pd->tos;
397               oh4->checksum = ip4_header_checksum (oh4);
398             }
399         }
400
401       vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
402     trace:
403       if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
404         {
405           sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
406           ah_decrypt_trace_t *tr =
407             vlib_add_trace (vm, node, b[0], sizeof (*tr));
408           tr->integ_alg = sa0->integ_alg;
409           tr->seq_num = pd->seq;
410         }
411
412       n_left -= 1;
413       pd += 1;
414       next += 1;
415       b += 1;
416     }
417
418   n_left = from_frame->n_vectors;
419   vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
420
421   return n_left;
422 }
423
424 VLIB_NODE_FN (ah4_decrypt_node) (vlib_main_t * vm,
425                                  vlib_node_runtime_t * node,
426                                  vlib_frame_t * from_frame)
427 {
428   return ah_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
429 }
430
431 /* *INDENT-OFF* */
432 VLIB_REGISTER_NODE (ah4_decrypt_node) = {
433   .name = "ah4-decrypt",
434   .vector_size = sizeof (u32),
435   .format_trace = format_ah_decrypt_trace,
436   .type = VLIB_NODE_TYPE_INTERNAL,
437
438   .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
439   .error_strings = ah_decrypt_error_strings,
440
441   .n_next_nodes = AH_DECRYPT_N_NEXT,
442   .next_nodes = {
443     [AH_DECRYPT_NEXT_DROP] = "ip4-drop",
444     [AH_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
445     [AH_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
446     [AH_DECRYPT_NEXT_HANDOFF] = "ah4-decrypt-handoff",
447   },
448 };
449 /* *INDENT-ON* */
450
451 VLIB_NODE_FN (ah6_decrypt_node) (vlib_main_t * vm,
452                                  vlib_node_runtime_t * node,
453                                  vlib_frame_t * from_frame)
454 {
455   return ah_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
456 }
457
458 /* *INDENT-OFF* */
459 VLIB_REGISTER_NODE (ah6_decrypt_node) = {
460   .name = "ah6-decrypt",
461   .vector_size = sizeof (u32),
462   .format_trace = format_ah_decrypt_trace,
463   .type = VLIB_NODE_TYPE_INTERNAL,
464
465   .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
466   .error_strings = ah_decrypt_error_strings,
467
468   .n_next_nodes = AH_DECRYPT_N_NEXT,
469   .next_nodes = {
470     [AH_DECRYPT_NEXT_DROP] = "ip6-drop",
471     [AH_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
472     [AH_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
473     [AH_DECRYPT_NEXT_HANDOFF] = "ah6-decrypt-handoff",
474   },
475 };
476 /* *INDENT-ON* */
477
478 #ifndef CLIB_MARCH_VARIANT
479
480 static clib_error_t *
481 ah_decrypt_init (vlib_main_t *vm)
482 {
483   ipsec_main_t *im = &ipsec_main;
484
485   im->ah4_dec_fq_index =
486     vlib_frame_queue_main_init (ah4_decrypt_node.index, 0);
487   im->ah6_dec_fq_index =
488     vlib_frame_queue_main_init (ah6_decrypt_node.index, 0);
489
490   return 0;
491 }
492
493 VLIB_INIT_FUNCTION (ah_decrypt_init);
494
495 #endif
496
497 /*
498  * fd.io coding-style-patch-verification: ON
499  *
500  * Local Variables:
501  * eval: (c-set-style "gnu")
502  * End:
503  */