virtio: Add RX queue full statisitics
[vpp.git] / src / vnet / ipsec / ipsec_output.c
1 /*
2  * ipsec_output.c : IPSec output node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/ipsec_io.h>
24 #include <vnet/ipsec/ipsec_output.h>
25
26 #define foreach_ipsec_output_error                   \
27  _(RX_PKTS, "IPSec pkts received")                   \
28  _(POLICY_DISCARD, "IPSec policy discard")           \
29  _(POLICY_NO_MATCH, "IPSec policy (no match)")       \
30  _(POLICY_PROTECT, "IPSec policy protect")           \
31  _(POLICY_BYPASS, "IPSec policy bypass")             \
32  _(ENCAPS_FAILED, "IPSec encapsulation failed")
33
34 typedef enum
35 {
36 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
37   foreach_ipsec_output_error
38 #undef _
39     IPSEC_DECAP_N_ERROR,
40 } ipsec_output_error_t;
41
42 static char *ipsec_output_error_strings[] = {
43 #define _(sym,string) string,
44   foreach_ipsec_output_error
45 #undef _
46 };
47
48 typedef struct
49 {
50   u32 spd_id;
51   u32 policy_id;
52 } ipsec_output_trace_t;
53
54 /* packet trace format function */
55 static u8 *
56 format_ipsec_output_trace (u8 * s, va_list * args)
57 {
58   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60   ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
61
62   s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
63
64   return s;
65 }
66
67 static inline uword
68 ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
69                      vlib_frame_t * from_frame, int is_ipv6)
70 {
71   ipsec_main_t *im = &ipsec_main;
72
73   u32 *from, *to_next = 0, thread_index;
74   u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
75   u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
76   vlib_frame_t *f = 0;
77   u32 spd_index0 = ~0;
78   ipsec_spd_t *spd0 = 0;
79   int bogus;
80   u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
81   u8 flow_cache_enabled = im->output_flow_cache_flag;
82
83   from = vlib_frame_vector_args (from_frame);
84   n_left_from = from_frame->n_vectors;
85   thread_index = vm->thread_index;
86
87   while (n_left_from > 0)
88     {
89       u32 bi0, pi0, bi1;
90       vlib_buffer_t *b0, *b1;
91       ipsec_policy_t *p0 = NULL;
92       ip4_header_t *ip0;
93       ip6_header_t *ip6_0 = 0;
94       udp_header_t *udp0;
95       u32 iph_offset = 0;
96       tcp_header_t *tcp0;
97       u64 bytes0;
98
99       bi0 = from[0];
100       b0 = vlib_get_buffer (vm, bi0);
101       if (n_left_from > 1)
102         {
103           bi1 = from[1];
104           b1 = vlib_get_buffer (vm, bi1);
105           CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
106           vlib_prefetch_buffer_data (b1, LOAD);
107         }
108       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
109       iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
110       ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
111                               + iph_offset);
112
113       /* lookup for SPD only if sw_if_index is changed */
114       if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
115         {
116           uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
117           ALWAYS_ASSERT (p);
118           spd_index0 = p[0];
119           spd0 = pool_elt_at_index (im->spds, spd_index0);
120           last_sw_if_index = sw_if_index0;
121         }
122
123       if (is_ipv6)
124         {
125           ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
126                                     + iph_offset);
127
128           udp0 = ip6_next_header (ip6_0);
129 #if 0
130           clib_warning
131             ("packet received from %U port %u to %U port %u spd_id %u",
132              format_ip6_address, &ip6_0->src_address,
133              clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
134              &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
135              spd0->id);
136 #endif
137
138           p0 = ipsec6_output_policy_match (spd0,
139                                            &ip6_0->src_address,
140                                            &ip6_0->dst_address,
141                                            clib_net_to_host_u16
142                                            (udp0->src_port),
143                                            clib_net_to_host_u16
144                                            (udp0->dst_port), ip6_0->protocol);
145         }
146       else
147         {
148           udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
149
150 #if 0
151           clib_warning ("packet received from %U to %U port %u",
152                         format_ip4_address, ip0->src_address.as_u8,
153                         format_ip4_address, ip0->dst_address.as_u8,
154                         clib_net_to_host_u16 (udp0->dst_port));
155           clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
156                         sw_if_index0, spd_index0, spd0->id);
157 #endif
158
159           /*
160            * Check whether flow cache is enabled.
161            */
162           if (flow_cache_enabled)
163             {
164               p0 = ipsec4_out_spd_find_flow_cache_entry (
165                 im, ip0->protocol, ip0->src_address.as_u32,
166                 ip0->dst_address.as_u32, udp0->src_port, udp0->dst_port);
167             }
168
169           /* Fall back to linear search if flow cache lookup fails */
170           if (p0 == NULL)
171             {
172               p0 = ipsec_output_policy_match (
173                 spd0, ip0->protocol,
174                 clib_net_to_host_u32 (ip0->src_address.as_u32),
175                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
176                 clib_net_to_host_u16 (udp0->src_port),
177                 clib_net_to_host_u16 (udp0->dst_port), flow_cache_enabled);
178             }
179         }
180       tcp0 = (void *) udp0;
181
182       if (PREDICT_TRUE (p0 != NULL))
183         {
184           pi0 = p0 - im->policies;
185
186           vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
187                                           thread_index, pi0);
188
189           if (is_ipv6)
190             {
191               bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
192               bytes0 += sizeof (ip6_header_t);
193             }
194           else
195             {
196               bytes0 = clib_net_to_host_u16 (ip0->length);
197             }
198
199           if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
200             {
201               ipsec_sa_t *sa = 0;
202               nc_protect++;
203               sa = ipsec_sa_get (p0->sa_index);
204               if (sa->protocol == IPSEC_PROTOCOL_ESP)
205                 if (is_ipv6)
206                   next_node_index = im->esp6_encrypt_node_index;
207                 else
208                   next_node_index = im->esp4_encrypt_node_index;
209               else if (is_ipv6)
210                 next_node_index = im->ah6_encrypt_node_index;
211               else
212                 next_node_index = im->ah4_encrypt_node_index;
213               vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
214
215               if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
216                 {
217                   vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
218
219                   /*
220                    * Clearing offload flags before checksum is computed
221                    * It guarantees the cache hit!
222                    */
223                   vnet_buffer_offload_flags_clear (b0, oflags);
224
225                   if (is_ipv6)
226                     {
227                       if (PREDICT_FALSE (oflags &
228                                          VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
229                         {
230                           tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
231                             vm, b0, ip6_0, &bogus);
232                         }
233                       if (PREDICT_FALSE (oflags &
234                                          VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
235                         {
236                           udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
237                             vm, b0, ip6_0, &bogus);
238                         }
239                     }
240                   else
241                     {
242                       if (PREDICT_FALSE (oflags &
243                                          VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
244                         {
245                           ip0->checksum = ip4_header_checksum (ip0);
246                         }
247                       if (PREDICT_FALSE (oflags &
248                                          VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
249                         {
250                           tcp0->checksum =
251                             ip4_tcp_udp_compute_checksum (vm, b0, ip0);
252                         }
253                       if (PREDICT_FALSE (oflags &
254                                          VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
255                         {
256                           udp0->checksum =
257                             ip4_tcp_udp_compute_checksum (vm, b0, ip0);
258                         }
259                     }
260                 }
261               vlib_buffer_advance (b0, iph_offset);
262             }
263           else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
264             {
265               nc_bypass++;
266               next_node_index = get_next_output_feature_node_index (b0, node);
267             }
268           else
269             {
270               nc_discard++;
271               next_node_index = im->error_drop_node_index;
272             }
273           vlib_increment_combined_counter
274             (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
275         }
276       else
277         {
278           pi0 = ~0;
279           nc_nomatch++;
280           next_node_index = im->error_drop_node_index;
281         }
282
283       from += 1;
284       n_left_from -= 1;
285
286       if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
287         {
288           /* if this is not 1st frame */
289           if (f)
290             vlib_put_frame_to_node (vm, last_next_node_index, f);
291
292           last_next_node_index = next_node_index;
293
294           f = vlib_get_frame_to_node (vm, next_node_index);
295
296           /* frame->frame_flags, copy it from node */
297           /* Copy trace flag from next_frame and from runtime. */
298           f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
299
300           to_next = vlib_frame_vector_args (f);
301         }
302
303       to_next[0] = bi0;
304       to_next += 1;
305       f->n_vectors++;
306
307       if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
308           PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
309         {
310           ipsec_output_trace_t *tr =
311             vlib_add_trace (vm, node, b0, sizeof (*tr));
312           if (spd0)
313             tr->spd_id = spd0->id;
314           tr->policy_id = pi0;
315         }
316     }
317
318   vlib_put_frame_to_node (vm, next_node_index, f);
319   vlib_node_increment_counter (vm, node->node_index,
320                                IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
321   vlib_node_increment_counter (vm, node->node_index,
322                                IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
323   vlib_node_increment_counter (vm, node->node_index,
324                                IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
325   vlib_node_increment_counter (vm, node->node_index,
326                                IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
327                                nc_nomatch);
328   return from_frame->n_vectors;
329 }
330
331 VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
332                                    vlib_node_runtime_t * node,
333                                    vlib_frame_t * frame)
334 {
335   return ipsec_output_inline (vm, node, frame, 0);
336 }
337
338 VLIB_REGISTER_NODE (ipsec4_output_node) = {
339   .name = "ipsec4-output-feature",
340   .vector_size = sizeof (u32),
341   .format_trace = format_ipsec_output_trace,
342   .type = VLIB_NODE_TYPE_INTERNAL,
343
344   .n_errors = ARRAY_LEN(ipsec_output_error_strings),
345   .error_strings = ipsec_output_error_strings,
346
347   .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
348   .next_nodes = {
349 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
350     foreach_ipsec_output_next
351 #undef _
352   },
353 };
354
355 VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
356                                    vlib_node_runtime_t * node,
357                                    vlib_frame_t * frame)
358 {
359   return ipsec_output_inline (vm, node, frame, 1);
360 }
361
362 VLIB_REGISTER_NODE (ipsec6_output_node) = {
363   .name = "ipsec6-output-feature",
364   .vector_size = sizeof (u32),
365   .format_trace = format_ipsec_output_trace,
366   .type = VLIB_NODE_TYPE_INTERNAL,
367
368   .n_errors = ARRAY_LEN(ipsec_output_error_strings),
369   .error_strings = ipsec_output_error_strings,
370
371   .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
372   .next_nodes = {
373 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
374     foreach_ipsec_output_next
375 #undef _
376   },
377 };
378