2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * This file implements caching of ioam header and reattaching
17 * it in response message by performing request-response matching.
18 * Works for TCP SYN/SYN-ACK.
19 * This feature is used for anycast server selection.
20 * ioam data thus cached is used to measure and get complete round trip
21 * network path to help in server selection.
22 * There are 2 graph nodes defined to :
23 * 1. process packets that contain iOAM header and cache it
24 * 2. process TCP SYN-ACKs and reattach ioam header from the
25 * cache corresponding to TCP-SYN
26 * These graph nodes are attached to the vnet graph based on
27 * ioam cache and classifier configs.
29 * If db06::06 is the anycast service IP6 address:
33 * Apply this classifier on interface where requests for anycast service are received:
34 * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3 ip6 dst db06::06
35 * ioam-decap anycast <<< ioam-decap is hooked to cache when set ioam ip6 cache is enabled
37 * Apply this classifier on interface where responses from anycast service are received:
38 * classify session acl-hit-next ip6-node ip6-add-from-cache-hop-by-hop table-index 0 match l3
39 * ip6 src db06::06 ioam-encap anycast-response
42 #include <vlib/vlib.h>
43 #include <vnet/vnet.h>
44 #include <vnet/pg/pg.h>
45 #include <vppinfra/error.h>
46 #include <vnet/ip/ip.h>
47 #include <ioam/ip6/ioam_cache.h>
48 #include <vnet/ip/ip6_hop_by_hop.h>
49 #include <vnet/ip/ip6_hop_by_hop_packet.h>
57 /* packet trace format function */
59 format_cache_trace (u8 * s, va_list * args)
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 cache_trace_t *t = va_arg (*args, cache_trace_t *);
65 s = format (s, "CACHE: flow_label %d, next index %d",
66 t->flow_label, t->next_index);
70 vlib_node_registration_t ioam_cache_node;
72 #define foreach_cache_error \
73 _(RECORDED, "ip6 iOAM headers cached")
77 #define _(sym,str) CACHE_ERROR_##sym,
83 static char *cache_error_strings[] = {
84 #define _(sym,string) string,
91 IOAM_CACHE_NEXT_POP_HBYH,
96 ip6_ioam_cache_node_fn (vlib_main_t * vm,
97 vlib_node_runtime_t * node, vlib_frame_t * frame)
99 u32 n_left_from, *from, *to_next;
100 cache_next_t next_index;
103 from = vlib_frame_vector_args (frame);
104 n_left_from = frame->n_vectors;
105 next_index = node->cached_next_index;
107 while (n_left_from > 0)
111 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
113 while (n_left_from > 0 && n_left_to_next > 0)
117 u32 next0 = IOAM_CACHE_NEXT_POP_HBYH;
119 ip6_hop_by_hop_header_t *hbh0;
123 /* speculatively enqueue p0 to the current next frame */
131 p0 = vlib_get_buffer (vm, bi0);
132 ip0 = vlib_buffer_get_current (p0);
133 if (IP_PROTOCOL_TCP ==
134 ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
136 tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
137 if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
138 (tcp0->flags & TCP_FLAG_ACK) == 0)
140 /* Cache the ioam hbh header */
141 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
142 if (0 == ioam_cache_add (p0,
147 (tcp0->dst_port), hbh0,
149 (tcp0->seq_number) + 1))
155 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
157 if (p0->flags & VLIB_BUFFER_IS_TRACED)
160 vlib_add_trace (vm, node, p0, sizeof (*t));
163 (ip0->ip_version_traffic_class_and_flow_label);
164 t->next_index = next0;
167 /* verify speculative enqueue, maybe switch current next frame */
168 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
169 to_next, n_left_to_next,
173 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
176 vlib_node_increment_counter (vm, ioam_cache_node.index,
177 CACHE_ERROR_RECORDED, recorded);
178 return frame->n_vectors;
182 * Node for IP6 iOAM header cache
185 VLIB_REGISTER_NODE (ioam_cache_node) =
187 .function = ip6_ioam_cache_node_fn,
188 .name = "ip6-ioam-cache",
189 .vector_size = sizeof (u32),
190 .format_trace = format_cache_trace,
191 .type = VLIB_NODE_TYPE_INTERNAL,
192 .n_errors = ARRAY_LEN (cache_error_strings),
193 .error_strings = cache_error_strings,
194 .n_next_nodes = IOAM_CACHE_N_NEXT,
195 /* edit / add dispositions here */
198 [IOAM_CACHE_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop"
206 } ip6_add_from_cache_hbh_trace_t;
208 /* packet trace format function */
210 format_ip6_add_from_cache_hbh_trace (u8 * s, va_list * args)
212 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
213 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
214 ip6_add_from_cache_hbh_trace_t *t = va_arg (*args,
215 ip6_add_from_cache_hbh_trace_t
218 s = format (s, "IP6_ADD_FROM_CACHE_HBH: next index %d", t->next_index);
222 vlib_node_registration_t ip6_add_from_cache_hbh_node;
224 #define foreach_ip6_add_from_cache_hbh_error \
225 _(PROCESSED, "Pkts w/ added ip6 hop-by-hop options")
229 #define _(sym,str) IP6_ADD_FROM_CACHE_HBH_ERROR_##sym,
230 foreach_ip6_add_from_cache_hbh_error
232 IP6_ADD_FROM_CACHE_HBH_N_ERROR,
233 } ip6_add_from_cache_hbh_error_t;
235 static char *ip6_add_from_cache_hbh_error_strings[] = {
236 #define _(sym,string) string,
237 foreach_ip6_add_from_cache_hbh_error
241 #define foreach_ip6_ioam_cache_input_next \
242 _(IP6_LOOKUP, "ip6-lookup") \
243 _(DROP, "error-drop")
247 #define _(s,n) IP6_IOAM_CACHE_INPUT_NEXT_##s,
248 foreach_ip6_ioam_cache_input_next
250 IP6_IOAM_CACHE_INPUT_N_NEXT,
251 } ip6_ioam_cache_input_next_t;
255 ip6_add_from_cache_hbh_node_fn (vlib_main_t * vm,
256 vlib_node_runtime_t * node,
257 vlib_frame_t * frame)
259 ioam_cache_main_t *cm = &ioam_cache_main;
260 u32 n_left_from, *from, *to_next;
261 ip_lookup_next_t next_index;
265 u32 sr_rewrite_len = vec_len (cm->sr_rewrite_template);
267 from = vlib_frame_vector_args (frame);
268 n_left_from = frame->n_vectors;
269 next_index = node->cached_next_index;
271 while (n_left_from > 0)
275 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
277 while (n_left_from > 0 && n_left_to_next > 0)
283 ip6_hop_by_hop_header_t *hbh0;
284 ip6_sr_header_t *srh0 = 0;
285 u64 *copy_src0, *copy_dst0;
289 ioam_cache_entry_t *entry = 0;
291 next0 = IP6_IOAM_CACHE_INPUT_NEXT_IP6_LOOKUP;
292 /* speculatively enqueue b0 to the current next frame */
300 b0 = vlib_get_buffer (vm, bi0);
302 ip0 = vlib_buffer_get_current (b0);
303 if (IP_PROTOCOL_TCP !=
304 ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
308 tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
309 if (((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
310 (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK) ||
311 (tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
313 if (0 != (entry = ioam_cache_lookup (ip0,
319 (tcp0->ack_number))))
321 rewrite = entry->ioam_rewrite_string;
322 rewrite_len = vec_len (rewrite);
326 next0 = IP6_IOAM_CACHE_INPUT_NEXT_DROP;
334 /* Copy the ip header left by the required amount */
335 copy_dst0 = (u64 *) (((u8 *) ip0) - (rewrite_len + sr_rewrite_len));
336 copy_src0 = (u64 *) ip0;
338 copy_dst0[0] = copy_src0[0];
339 copy_dst0[1] = copy_src0[1];
340 copy_dst0[2] = copy_src0[2];
341 copy_dst0[3] = copy_src0[3];
342 copy_dst0[4] = copy_src0[4];
343 vlib_buffer_advance (b0, -(word) (rewrite_len + sr_rewrite_len));
344 ip0 = vlib_buffer_get_current (b0);
346 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
347 srh0 = (ip6_sr_header_t *) ((u8 *) hbh0 + rewrite_len);
348 /* $$$ tune, rewrite_len is a multiple of 8 */
349 clib_memcpy (hbh0, rewrite, rewrite_len);
350 clib_memcpy (srh0, cm->sr_rewrite_template, sr_rewrite_len);
351 /* Copy dst address into the DA slot in the segment list */
352 clib_memcpy (srh0->segments, ip0->dst_address.as_u64,
353 sizeof (ip6_address_t));
354 /* Rewrite the ip6 dst address with the first hop */
355 clib_memcpy (ip0->dst_address.as_u64, entry->next_hop.as_u64,
356 sizeof (ip6_address_t));
357 clib_memcpy (&srh0->segments[1],
358 (u8 *) hbh0 + entry->my_address_offset,
359 sizeof (ip6_address_t));
360 srh0->segments_left--;
361 ioam_cache_entry_free (entry);
363 /* Patch the protocol chain, insert the h-b-h (type 0) header */
364 srh0->protocol = ip0->protocol;
365 hbh0->protocol = IPPROTO_IPV6_ROUTE;
368 clib_net_to_host_u16 (ip0->payload_length) + rewrite_len +
370 ip0->payload_length = clib_host_to_net_u16 (new_l0);
373 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
374 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
376 ip6_add_from_cache_hbh_trace_t *t =
377 vlib_add_trace (vm, node, b0, sizeof (*t));
378 t->next_index = next0;
381 /* verify speculative enqueue, maybe switch current next frame */
382 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
383 to_next, n_left_to_next,
387 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
390 vlib_node_increment_counter (vm, ip6_add_from_cache_hbh_node.index,
391 IP6_ADD_FROM_CACHE_HBH_ERROR_PROCESSED,
393 return frame->n_vectors;
396 VLIB_REGISTER_NODE (ip6_add_from_cache_hbh_node) =
398 .function = ip6_add_from_cache_hbh_node_fn,
399 .name = "ip6-add-from-cache-hop-by-hop",
400 .vector_size = sizeof (u32),
401 .format_trace = format_ip6_add_from_cache_hbh_trace,
402 .type = VLIB_NODE_TYPE_INTERNAL,
403 .n_errors = ARRAY_LEN (ip6_add_from_cache_hbh_error_strings),
404 .error_strings = ip6_add_from_cache_hbh_error_strings,
405 /* See ip/lookup.h */
406 .n_next_nodes = IP6_IOAM_CACHE_INPUT_N_NEXT,
409 #define _(s,n) [IP6_IOAM_CACHE_INPUT_NEXT_##s] = n,
410 foreach_ip6_ioam_cache_input_next
416 VLIB_NODE_FUNCTION_MULTIARCH (ip6_add_from_cache_hbh_node,
417 ip6_add_from_cache_hbh_node_fn)
419 * fd.io coding-style-patch-verification: ON
422 * eval: (c-set-style "gnu")