ipsec: make match function inline
[vpp.git] / src / vnet / ipsec / ipsec_output.c
1 /*
2  * ipsec_output.c : IPSec output node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/ipsec_io.h>
24 #include <vnet/ipsec/ipsec_output.h>
25
26 #define foreach_ipsec_output_error                   \
27  _(RX_PKTS, "IPSec pkts received")                   \
28  _(POLICY_DISCARD, "IPSec policy discard")           \
29  _(POLICY_NO_MATCH, "IPSec policy (no match)")       \
30  _(POLICY_PROTECT, "IPSec policy protect")           \
31  _(POLICY_BYPASS, "IPSec policy bypass")             \
32  _(ENCAPS_FAILED, "IPSec encapsulation failed")
33
34 typedef enum
35 {
36 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
37   foreach_ipsec_output_error
38 #undef _
39     IPSEC_DECAP_N_ERROR,
40 } ipsec_output_error_t;
41
42 static char *ipsec_output_error_strings[] = {
43 #define _(sym,string) string,
44   foreach_ipsec_output_error
45 #undef _
46 };
47
48 typedef struct
49 {
50   u32 spd_id;
51   u32 policy_id;
52 } ipsec_output_trace_t;
53
54 /* packet trace format function */
55 static u8 *
56 format_ipsec_output_trace (u8 * s, va_list * args)
57 {
58   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60   ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
61
62   s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
63
64   return s;
65 }
66
67 always_inline uword
68 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
69                       ip6_address_t * ua)
70 {
71   if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
72       (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
73     return 1;
74   return 0;
75 }
76
77 always_inline ipsec_policy_t *
78 ipsec6_output_policy_match (ipsec_spd_t * spd,
79                             ip6_address_t * la,
80                             ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
81 {
82   ipsec_main_t *im = &ipsec_main;
83   ipsec_policy_t *p;
84
85   u32 *i;
86
87   if (!spd)
88     return 0;
89
90   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
91   {
92     p = pool_elt_at_index (im->policies, *i);
93     if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
94                        (p->protocol != pr)))
95       continue;
96
97     if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
98       continue;
99
100     if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
101       continue;
102
103     if (PREDICT_FALSE
104         ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
105          && (pr != IP_PROTOCOL_SCTP)))
106       return p;
107
108     if (lp < p->lport.start)
109       continue;
110
111     if (lp > p->lport.stop)
112       continue;
113
114     if (rp < p->rport.start)
115       continue;
116
117     if (rp > p->rport.stop)
118       continue;
119
120     return p;
121   }
122
123   return 0;
124 }
125
126 static inline uword
127 ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
128                      vlib_frame_t * from_frame, int is_ipv6)
129 {
130   ipsec_main_t *im = &ipsec_main;
131
132   u32 *from, *to_next = 0, thread_index;
133   u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
134   u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
135   vlib_frame_t *f = 0;
136   u32 spd_index0 = ~0;
137   ipsec_spd_t *spd0 = 0;
138   int bogus;
139   u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
140   u8 flow_cache_enabled = im->output_flow_cache_flag;
141
142   from = vlib_frame_vector_args (from_frame);
143   n_left_from = from_frame->n_vectors;
144   thread_index = vm->thread_index;
145
146   while (n_left_from > 0)
147     {
148       u32 bi0, pi0, bi1;
149       vlib_buffer_t *b0, *b1;
150       ipsec_policy_t *p0 = NULL;
151       ip4_header_t *ip0;
152       ip6_header_t *ip6_0 = 0;
153       udp_header_t *udp0;
154       u32 iph_offset = 0;
155       tcp_header_t *tcp0;
156       u64 bytes0;
157
158       bi0 = from[0];
159       b0 = vlib_get_buffer (vm, bi0);
160       if (n_left_from > 1)
161         {
162           bi1 = from[1];
163           b1 = vlib_get_buffer (vm, bi1);
164           CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
165           vlib_prefetch_buffer_data (b1, LOAD);
166         }
167       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
168       iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
169       ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
170                               + iph_offset);
171
172       /* lookup for SPD only if sw_if_index is changed */
173       if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
174         {
175           uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
176           ALWAYS_ASSERT (p);
177           spd_index0 = p[0];
178           spd0 = pool_elt_at_index (im->spds, spd_index0);
179           last_sw_if_index = sw_if_index0;
180         }
181
182       if (is_ipv6)
183         {
184           ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
185                                     + iph_offset);
186
187           udp0 = ip6_next_header (ip6_0);
188 #if 0
189           clib_warning
190             ("packet received from %U port %u to %U port %u spd_id %u",
191              format_ip6_address, &ip6_0->src_address,
192              clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
193              &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
194              spd0->id);
195 #endif
196
197           p0 = ipsec6_output_policy_match (spd0,
198                                            &ip6_0->src_address,
199                                            &ip6_0->dst_address,
200                                            clib_net_to_host_u16
201                                            (udp0->src_port),
202                                            clib_net_to_host_u16
203                                            (udp0->dst_port), ip6_0->protocol);
204         }
205       else
206         {
207           udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
208
209 #if 0
210           clib_warning ("packet received from %U to %U port %u",
211                         format_ip4_address, ip0->src_address.as_u8,
212                         format_ip4_address, ip0->dst_address.as_u8,
213                         clib_net_to_host_u16 (udp0->dst_port));
214           clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
215                         sw_if_index0, spd_index0, spd0->id);
216 #endif
217
218           /*
219            * Check whether flow cache is enabled.
220            */
221           if (flow_cache_enabled)
222             {
223               p0 = ipsec4_out_spd_find_flow_cache_entry (
224                 im, ip0->protocol, ip0->src_address.as_u32,
225                 ip0->dst_address.as_u32, udp0->src_port, udp0->dst_port);
226             }
227
228           /* Fall back to linear search if flow cache lookup fails */
229           if (p0 == NULL)
230             {
231               p0 = ipsec_output_policy_match (
232                 spd0, ip0->protocol,
233                 clib_net_to_host_u32 (ip0->src_address.as_u32),
234                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
235                 clib_net_to_host_u16 (udp0->src_port),
236                 clib_net_to_host_u16 (udp0->dst_port), flow_cache_enabled);
237             }
238         }
239       tcp0 = (void *) udp0;
240
241       if (PREDICT_TRUE (p0 != NULL))
242         {
243           pi0 = p0 - im->policies;
244
245           vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
246                                           thread_index, pi0);
247
248           if (is_ipv6)
249             {
250               bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
251               bytes0 += sizeof (ip6_header_t);
252             }
253           else
254             {
255               bytes0 = clib_net_to_host_u16 (ip0->length);
256             }
257
258           if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
259             {
260               ipsec_sa_t *sa = 0;
261               nc_protect++;
262               sa = ipsec_sa_get (p0->sa_index);
263               if (sa->protocol == IPSEC_PROTOCOL_ESP)
264                 if (is_ipv6)
265                   next_node_index = im->esp6_encrypt_node_index;
266                 else
267                   next_node_index = im->esp4_encrypt_node_index;
268               else if (is_ipv6)
269                 next_node_index = im->ah6_encrypt_node_index;
270               else
271                 next_node_index = im->ah4_encrypt_node_index;
272               vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
273
274               if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
275                 {
276                   vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
277
278                   /*
279                    * Clearing offload flags before checksum is computed
280                    * It guarantees the cache hit!
281                    */
282                   vnet_buffer_offload_flags_clear (b0, oflags);
283
284                   if (is_ipv6)
285                     {
286                       if (PREDICT_FALSE (oflags &
287                                          VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
288                         {
289                           tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
290                             vm, b0, ip6_0, &bogus);
291                         }
292                       if (PREDICT_FALSE (oflags &
293                                          VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
294                         {
295                           udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
296                             vm, b0, ip6_0, &bogus);
297                         }
298                     }
299                   else
300                     {
301                       if (PREDICT_FALSE (oflags &
302                                          VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
303                         {
304                           ip0->checksum = ip4_header_checksum (ip0);
305                         }
306                       if (PREDICT_FALSE (oflags &
307                                          VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
308                         {
309                           tcp0->checksum =
310                             ip4_tcp_udp_compute_checksum (vm, b0, ip0);
311                         }
312                       if (PREDICT_FALSE (oflags &
313                                          VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
314                         {
315                           udp0->checksum =
316                             ip4_tcp_udp_compute_checksum (vm, b0, ip0);
317                         }
318                     }
319                 }
320               vlib_buffer_advance (b0, iph_offset);
321             }
322           else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
323             {
324               nc_bypass++;
325               next_node_index = get_next_output_feature_node_index (b0, node);
326             }
327           else
328             {
329               nc_discard++;
330               next_node_index = im->error_drop_node_index;
331             }
332           vlib_increment_combined_counter
333             (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
334         }
335       else
336         {
337           pi0 = ~0;
338           nc_nomatch++;
339           next_node_index = im->error_drop_node_index;
340         }
341
342       from += 1;
343       n_left_from -= 1;
344
345       if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
346         {
347           /* if this is not 1st frame */
348           if (f)
349             vlib_put_frame_to_node (vm, last_next_node_index, f);
350
351           last_next_node_index = next_node_index;
352
353           f = vlib_get_frame_to_node (vm, next_node_index);
354
355           /* frame->frame_flags, copy it from node */
356           /* Copy trace flag from next_frame and from runtime. */
357           f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
358
359           to_next = vlib_frame_vector_args (f);
360         }
361
362       to_next[0] = bi0;
363       to_next += 1;
364       f->n_vectors++;
365
366       if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
367           PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
368         {
369           ipsec_output_trace_t *tr =
370             vlib_add_trace (vm, node, b0, sizeof (*tr));
371           if (spd0)
372             tr->spd_id = spd0->id;
373           tr->policy_id = pi0;
374         }
375     }
376
377   vlib_put_frame_to_node (vm, next_node_index, f);
378   vlib_node_increment_counter (vm, node->node_index,
379                                IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
380   vlib_node_increment_counter (vm, node->node_index,
381                                IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
382   vlib_node_increment_counter (vm, node->node_index,
383                                IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
384   vlib_node_increment_counter (vm, node->node_index,
385                                IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
386                                nc_nomatch);
387   return from_frame->n_vectors;
388 }
389
390 VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
391                                    vlib_node_runtime_t * node,
392                                    vlib_frame_t * frame)
393 {
394   return ipsec_output_inline (vm, node, frame, 0);
395 }
396
397 /* *INDENT-OFF* */
398 VLIB_REGISTER_NODE (ipsec4_output_node) = {
399   .name = "ipsec4-output-feature",
400   .vector_size = sizeof (u32),
401   .format_trace = format_ipsec_output_trace,
402   .type = VLIB_NODE_TYPE_INTERNAL,
403
404   .n_errors = ARRAY_LEN(ipsec_output_error_strings),
405   .error_strings = ipsec_output_error_strings,
406
407   .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
408   .next_nodes = {
409 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
410     foreach_ipsec_output_next
411 #undef _
412   },
413 };
414 /* *INDENT-ON* */
415
416 VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
417                                    vlib_node_runtime_t * node,
418                                    vlib_frame_t * frame)
419 {
420   return ipsec_output_inline (vm, node, frame, 1);
421 }
422
423 VLIB_REGISTER_NODE (ipsec6_output_node) = {
424   .name = "ipsec6-output-feature",
425   .vector_size = sizeof (u32),
426   .format_trace = format_ipsec_output_trace,
427   .type = VLIB_NODE_TYPE_INTERNAL,
428
429   .n_errors = ARRAY_LEN(ipsec_output_error_strings),
430   .error_strings = ipsec_output_error_strings,
431
432   .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
433   .next_nodes = {
434 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
435     foreach_ipsec_output_next
436 #undef _
437   },
438 };
439