2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_ioam_cache_h__
16 #define __included_ioam_cache_h__
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/srv6/sr.h>
25 #include <vppinfra/pool.h>
26 #include <vppinfra/hash.h>
27 #include <vppinfra/error.h>
28 #include <vppinfra/elog.h>
29 #include <vppinfra/bihash_8_8.h>
30 #include <ioam/analyse/ip6/ip6_ioam_analyse.h>
31 #include <vppinfra/tw_timer_16t_2w_512sl.h>
34 * This header contains routines for caching of ioam header and
36 * 1 - On application facing node: to cache ioam header recvd
37 * in request and reattach in response to provide round
38 * trip path visibility. Since request response matching
39 * is needed works with TCP and relies on (5 tuples,seq no)
40 * 2 - On M-Anycast server node: This node replicates requests
41 * towards multiple anycast service nodes serving anycast
42 * IP6 address. It evaluates response and forwards the best
43 * response towards the client of requesting the service.
44 * Again since request-response matching is needed, works
45 * with TCP and relies on (5 tuples,seq no) for matching.
46 * To do this it caches SYN-ACK responses for a short time to
47 * evaluate multiple responses received before the selected
48 * SYN-ACK response is forwared and others dropped.
50 * M-Anycast server cache:
51 * - There is a pool of cache entries per worker thread.
52 * - Cache entry is created when SYN is received expected
53 * number of responses are marked based on number of
54 * SR tunnels for the anycast destination address
55 * - The pool/thread id and pool index are attached in the
56 * message as an ioam option for quick look up.
57 * - When is received SYN-ACK the ioam option containing
58 * thread id + pool index of the cache entry is used to
59 * look up cache entry.
60 * - Cache synchronization:
61 * - This is achieved by cache entry add/del/update all handled
62 * by the same worker/main thread
63 * - Packets from client to threads - syn packets, can be disctributed
64 * based on incoming interface affinity to the cpu core pinned to
65 * the thread or a simple sequence number based distribution
66 * if thread per interface is not scaling
67 * - Response packets from server towards clients - syn-acks, are
68 * forced to the same thread that created the cache entry
69 * using SR and the destination of SR v6 address assigned
70 * to the core/thread. This adderss is sent as an ioam option
71 * in the syn that can be then used on the other side to
72 * populate v6 dst address in the response
73 * - Timeout: timer wheel per thread is used to track the syn-ack wait
74 * time. The timer wheel tick is updated via an input node per thread.
76 * Application facing node/Service side cache:
77 * - Single pool of cache entries.
78 * - Cache entry is created when SYN is received. Caches the ioam
79 * header. Hash table entry is created based on 5 tuple and
80 * TCP seq no to pool index
81 * - Response SYN-ACK processed by looking up pool index in hash table
82 * and cache entry in the pool is used to get the ioam header rewrite
83 * string. Entry is freed from pool and hash table after use.
84 * - Locking/Synchronization: Currently this functionality is deployed
85 * with main/single thread only. Hence no locking is used.
86 * - Deployment: A VPP node per application server servicing anycast
87 * address is expected. Locking/synchronization needed when the server
88 * /application facing node is started with multiple worker threads.
93 * Application facing server side caching:
94 * Cache entry for ioam header
95 * Currently caters to TCP and relies on
96 * TCP - 5 tuples + seqno to cache and reinsert
97 * ioam header b/n TCP request response
101 ip6_address_t src_address;
102 ip6_address_t dst_address;
107 ip6_address_t next_hop;
108 u16 my_address_offset;
109 u8 *ioam_rewrite_string;
110 } ioam_cache_entry_t;
113 * Cache entry for anycast server selection
114 * Works for TCP as 5 tuple + sequence number
115 * is required for request response matching
116 * max_responses expected is set based on number
117 * of SR tunnels for the dst_address
118 * Timeout or all response_received = max_responses
119 * will clear the entry
120 * buffer_index index of the response msg vlib buffer
121 * that is currently the best response
127 ip6_address_t src_address;
128 ip6_address_t dst_address;
134 ip6_hop_by_hop_header_t *hbh; //pointer to hbh header in the buffer
136 u8 response_received;
138 u32 stop_timer_handle;
139 /** Handle returned from tw_start_timer */
141 /** entry should expire at this clock tick */
142 u32 expected_to_expire;
143 } ioam_cache_ts_entry_t;
146 * Per thread tunnel selection cache stats
152 } ioam_cache_ts_pool_stats_t;
154 /* Server side: iOAM header caching */
155 #define MAX_CACHE_ENTRIES 4096
156 /* M-Anycast: Cache for SR tunnel selection */
157 #define MAX_CACHE_TS_ENTRIES 1048576
159 #define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS (4 * 1024)
160 #define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE (2<<20)
164 /* API message ID base */
167 /* Pool of ioam_cache_buffer_t */
168 ioam_cache_entry_t *ioam_rewrite_pool;
170 /* For steering packets ioam cache entry is followed by
171 * SR header. This is the SR rewrite template */
172 u8 *sr_rewrite_template;
173 /* The current rewrite string being used */
175 u8 rewrite_pool_index_offset;
176 ip6_address_t sr_localsid_cache;
178 u64 lookup_table_nbuckets;
179 u64 lookup_table_size;
180 clib_bihash_8_8_t ioam_rewrite_cache_table;
182 /* M-Anycast: Pool of ioam_cache_ts_entry_t per thread */
183 ioam_cache_ts_entry_t **ioam_ts_pool;
184 ioam_cache_ts_pool_stats_t *ts_stats;
185 /** per thread single-wheel */
186 tw_timer_wheel_16t_2w_512sl_t *timer_wheels;
189 * Selection criteria: oneway delay: Server to M-Anycast
192 bool criteria_oneway;
193 u8 wait_for_responses;
194 ip6_address_t sr_localsid_ts;
197 vlib_main_t *vlib_main;
199 uword cache_hbh_slot;
201 u32 ip6_hbh_pop_node_index;
202 u32 error_node_index;
203 u32 cleanup_process_node_index;
206 ioam_cache_main_t ioam_cache_main;
208 extern vlib_node_registration_t ioam_cache_node;
209 extern vlib_node_registration_t ioam_cache_ts_node;
211 /* Compute flow hash. We'll use it to select which Sponge to use for this
212 * flow. And other things.
213 * ip6_compute_flow_hash in ip6.h doesnt locate tcp/udp when
214 * ext headers are present. While it could be made to it will be a
215 * performance hit for ECMP flows.
216 * HEnce this function here, with L4 information directly input
217 * Useful when tcp/udp headers are already located in presence of
221 ip6_compute_flow_hash_ext (const ip6_header_t * ip,
224 u16 dst_port, flow_hash_config_t flow_hash_config)
229 t1 = (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1]);
230 t1 = (flow_hash_config & IP_FLOW_HASH_SRC_ADDR) ? t1 : 0;
232 t2 = (ip->dst_address.as_u64[0] ^ ip->dst_address.as_u64[1]);
233 t2 = (flow_hash_config & IP_FLOW_HASH_DST_ADDR) ? t2 : 0;
235 a = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t2 : t1;
236 b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2;
237 b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? protocol : 0;
242 t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0;
243 t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0;
245 c = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ?
246 ((t1 << 16) | t2) : ((t2 << 16) | t1);
248 hash_mix64 (a, b, c);
253 /* 2 new ioam E2E options :
254 * 1. HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID: IP6 address
255 * of ioam node that inserted ioam header
256 * 2. HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID: Pool id and index
257 * to look up tunnel select cache entry
259 #define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID 30
260 #define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID 31
262 typedef CLIB_PACKED (struct
264 ip6_hop_by_hop_option_t hdr; u8 e2e_type; u8 reserved[5];
266 }) ioam_e2e_id_option_t;
268 typedef CLIB_PACKED (struct
270 ip6_hop_by_hop_option_t hdr; u8 e2e_type; u8 pool_id;
272 }) ioam_e2e_cache_option_t;
274 #define IOAM_E2E_ID_OPTION_RND ((sizeof(ioam_e2e_id_option_t) + 7) & ~7)
275 #define IOAM_E2E_ID_HBH_EXT_LEN (IOAM_E2E_ID_OPTION_RND >> 3)
276 #define IOAM_E2E_CACHE_OPTION_RND ((sizeof(ioam_e2e_cache_option_t) + 7) & ~7)
277 #define IOAM_E2E_CACHE_HBH_EXT_LEN (IOAM_E2E_CACHE_OPTION_RND >> 3)
280 ioam_e2e_id_rewrite_handler (ioam_e2e_id_option_t * e2e_option,
281 ip6_address_t * address)
283 e2e_option->id.as_u64[0] = address->as_u64[0];
284 e2e_option->id.as_u64[1] = address->as_u64[1];
288 /* Following functions are for the caching of ioam header
289 * to enable reattaching it for a complete request-response
290 * message exchange */
292 ioam_cache_entry_free (ioam_cache_entry_t * entry)
294 ioam_cache_main_t *cm = &ioam_cache_main;
297 vec_free (entry->ioam_rewrite_string);
298 memset (entry, 0, sizeof (*entry));
299 pool_put (cm->ioam_rewrite_pool, entry);
303 inline static ioam_cache_entry_t *
304 ioam_cache_entry_cleanup (u32 pool_index)
306 ioam_cache_main_t *cm = &ioam_cache_main;
307 ioam_cache_entry_t *entry = 0;
309 entry = pool_elt_at_index (cm->ioam_rewrite_pool, pool_index);
310 ioam_cache_entry_free (entry);
314 inline static ioam_cache_entry_t *
315 ioam_cache_lookup (ip6_header_t * ip0, u16 src_port, u16 dst_port, u32 seq_no)
317 ioam_cache_main_t *cm = &ioam_cache_main;
318 u32 flow_hash = ip6_compute_flow_hash_ext (ip0, ip0->protocol,
320 IP_FLOW_HASH_DEFAULT |
321 IP_FLOW_HASH_REVERSE_SRC_DST);
322 clib_bihash_kv_8_8_t kv, value;
324 kv.key = (u64) flow_hash << 32 | seq_no;
329 if (clib_bihash_search_8_8 (&cm->ioam_rewrite_cache_table, &kv, &value) >=
332 ioam_cache_entry_t *entry = 0;
334 entry = pool_elt_at_index (cm->ioam_rewrite_pool, value.value);
336 if (ip6_address_compare (&ip0->src_address, &entry->dst_address) == 0 &&
337 ip6_address_compare (&ip0->dst_address, &entry->src_address) == 0 &&
338 entry->src_port == dst_port &&
339 entry->dst_port == src_port && entry->seq_no == seq_no)
341 /* If lookup is successful remove it from the hash */
342 clib_bihash_add_del_8_8 (&cm->ioam_rewrite_cache_table, &kv, 0);
353 * Caches ioam hbh header
354 * Extends the hbh header with option to contain IP6 address of the node
358 ioam_cache_add (vlib_buffer_t * b0,
361 u16 dst_port, ip6_hop_by_hop_header_t * hbh0, u32 seq_no)
363 ioam_cache_main_t *cm = &ioam_cache_main;
364 ioam_cache_entry_t *entry = 0;
365 u32 rewrite_len = 0, e2e_id_offset = 0;
367 ioam_e2e_id_option_t *e2e = 0;
369 pool_get_aligned (cm->ioam_rewrite_pool, entry, CLIB_CACHE_LINE_BYTES);
370 memset (entry, 0, sizeof (*entry));
371 pool_index = entry - cm->ioam_rewrite_pool;
373 clib_memcpy (entry->dst_address.as_u64, ip0->dst_address.as_u64,
374 sizeof (ip6_address_t));
375 clib_memcpy (entry->src_address.as_u64, ip0->src_address.as_u64,
376 sizeof (ip6_address_t));
377 entry->src_port = src_port;
378 entry->dst_port = dst_port;
379 entry->seq_no = seq_no;
380 rewrite_len = ((hbh0->length + 1) << 3);
381 vec_validate (entry->ioam_rewrite_string, rewrite_len - 1);
382 e2e = ip6_ioam_find_hbh_option (hbh0, HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID);
385 entry->next_hop.as_u64[0] = e2e->id.as_u64[0];
386 entry->next_hop.as_u64[1] = e2e->id.as_u64[1];
392 e2e_id_offset = (u8 *) e2e - (u8 *) hbh0;
393 /* setup e2e id option to insert v6 address of the node caching it */
394 clib_memcpy (entry->ioam_rewrite_string, hbh0, rewrite_len);
395 hbh0 = (ip6_hop_by_hop_header_t *) entry->ioam_rewrite_string;
397 /* suffix rewrite string with e2e ID option */
398 e2e = (ioam_e2e_id_option_t *) (entry->ioam_rewrite_string + e2e_id_offset);
399 ioam_e2e_id_rewrite_handler (e2e, &cm->sr_localsid_cache);
400 entry->my_address_offset = (u8 *) (&e2e->id) - (u8 *) hbh0;
402 /* add it to hash, replacing and freeing any collision for now */
404 ip6_compute_flow_hash_ext (ip0, hbh0->protocol, src_port, dst_port,
405 IP_FLOW_HASH_DEFAULT);
406 clib_bihash_kv_8_8_t kv, value;
407 kv.key = (u64) flow_hash << 32 | seq_no;
409 if (clib_bihash_search_8_8 (&cm->ioam_rewrite_cache_table, &kv, &value) >=
413 ioam_cache_entry_cleanup (value.value);
415 kv.value = pool_index;
416 clib_bihash_add_del_8_8 (&cm->ioam_rewrite_cache_table, &kv, 1);
420 /* Creates SR rewrite string
421 * This is appended with ioam header on the server facing
423 * This SR header is necessary to attract packets towards
424 * selected Anycast server.
427 ioam_cache_sr_rewrite_template_create (void)
429 ioam_cache_main_t *cm = &ioam_cache_main;
430 ip6_address_t *segments = 0;
431 ip6_address_t *this_seg = 0;
433 /* This nodes address and the original dest will be
434 * filled when the packet is processed */
435 vec_add2 (segments, this_seg, 1);
436 memset (this_seg, 0xfe, sizeof (ip6_address_t));
437 cm->sr_rewrite_template = ip6_sr_compute_rewrite_string_insert (segments);
442 ioam_cache_table_init (vlib_main_t * vm)
444 ioam_cache_main_t *cm = &ioam_cache_main;
446 pool_alloc_aligned (cm->ioam_rewrite_pool,
447 MAX_CACHE_ENTRIES, CLIB_CACHE_LINE_BYTES);
448 cm->lookup_table_nbuckets = IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS;
449 cm->lookup_table_nbuckets = 1 << max_log2 (cm->lookup_table_nbuckets);
450 cm->lookup_table_size = IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE;
452 clib_bihash_init_8_8 (&cm->ioam_rewrite_cache_table,
453 "ioam rewrite cache table",
454 cm->lookup_table_nbuckets, cm->lookup_table_size);
455 /* Create SR rewrite template */
456 ioam_cache_sr_rewrite_template_create ();
461 ioam_cache_table_destroy (vlib_main_t * vm)
463 ioam_cache_main_t *cm = &ioam_cache_main;
464 ioam_cache_entry_t *entry = 0;
465 /* free pool and hash table */
466 clib_bihash_free_8_8 (&cm->ioam_rewrite_cache_table);
467 pool_foreach (entry, cm->ioam_rewrite_pool, (
469 ioam_cache_entry_free (entry);
471 pool_free (cm->ioam_rewrite_pool);
472 cm->ioam_rewrite_pool = 0;
473 vec_free (cm->sr_rewrite_template);
474 cm->sr_rewrite_template = 0;
479 format_ioam_cache_entry (u8 * s, va_list * args)
481 ioam_cache_entry_t *e = va_arg (*args, ioam_cache_entry_t *);
482 ioam_cache_main_t *cm = &ioam_cache_main;
483 int rewrite_len = vec_len (e->ioam_rewrite_string);
485 s = format (s, "%d: %U:%d to %U:%d seq_no %lu\n",
486 (e - cm->ioam_rewrite_pool),
487 format_ip6_address, &e->src_address,
489 format_ip6_address, &e->dst_address, e->dst_port, e->seq_no);
493 s = format (s, " %U",
494 format_ip6_hop_by_hop_ext_hdr,
495 (ip6_hop_by_hop_header_t *) e->ioam_rewrite_string,
501 void ioam_cache_ts_timer_node_enable (vlib_main_t * vm, u8 enable);
503 #define IOAM_CACHE_TS_TIMEOUT 1.0 //SYN timeout 1 sec
504 #define IOAM_CACHE_TS_TICK 100e-3
505 /* Timer delays as multiples of 100ms */
506 #define IOAM_CACHE_TS_TIMEOUT_TICKS IOAM_CACHE_TS_TICK*9
507 #define TIMER_HANDLE_INVALID ((u32) ~0)
510 void expired_cache_ts_timer_callback (u32 * expired_timers);
513 * Following functions are to manage M-Anycast server selection
515 * There is a per worker thread pool to create a cache entry
516 * for a TCP SYN received. TCP SYN-ACK contians ioam header
517 * with HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID option to point to the
521 ioam_cache_ts_table_init (vlib_main_t * vm)
523 ioam_cache_main_t *cm = &ioam_cache_main;
524 int no_of_threads = vec_len (vlib_worker_threads);
527 vec_validate_aligned (cm->ioam_ts_pool, no_of_threads - 1,
528 CLIB_CACHE_LINE_BYTES);
529 vec_validate_aligned (cm->ts_stats, no_of_threads - 1,
530 CLIB_CACHE_LINE_BYTES);
531 vec_validate_aligned (cm->timer_wheels, no_of_threads - 1,
532 CLIB_CACHE_LINE_BYTES);
533 cm->lookup_table_nbuckets = IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS;
534 cm->lookup_table_nbuckets = 1 << max_log2 (cm->lookup_table_nbuckets);
535 cm->lookup_table_size = IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE;
536 for (i = 0; i < no_of_threads; i++)
538 pool_alloc_aligned (cm->ioam_ts_pool[i],
539 MAX_CACHE_TS_ENTRIES, CLIB_CACHE_LINE_BYTES);
540 memset (&cm->ts_stats[i], 0, sizeof (ioam_cache_ts_pool_stats_t));
541 tw_timer_wheel_init_16t_2w_512sl (&cm->timer_wheels[i],
542 expired_cache_ts_timer_callback,
544 /* timer period 100ms */ ,
546 cm->timer_wheels[i].last_run_time = vlib_time_now (vm);
548 ioam_cache_ts_timer_node_enable (vm, 1);
553 ioam_cache_ts_timer_set (ioam_cache_main_t * cm,
554 ioam_cache_ts_entry_t * entry, u32 interval)
557 = tw_timer_start_16t_2w_512sl (&cm->timer_wheels[entry->pool_id],
558 entry->pool_index, 1, interval);
562 ioam_cache_ts_timer_reset (ioam_cache_main_t * cm,
563 ioam_cache_ts_entry_t * entry)
565 tw_timer_stop_16t_2w_512sl (&cm->timer_wheels[entry->pool_id],
566 entry->timer_handle);
567 entry->timer_handle = TIMER_HANDLE_INVALID;
571 ioam_cache_ts_entry_free (u32 thread_id,
572 ioam_cache_ts_entry_t * entry, u32 node_index)
574 ioam_cache_main_t *cm = &ioam_cache_main;
575 vlib_main_t *vm = cm->vlib_main;
576 vlib_frame_t *nf = 0;
583 nf = vlib_get_frame_to_node (vm, node_index);
585 to_next = vlib_frame_vector_args (nf);
587 to_next[0] = entry->buffer_index;
588 vlib_put_frame_to_node (vm, node_index, nf);
590 pool_put (cm->ioam_ts_pool[thread_id], entry);
591 cm->ts_stats[thread_id].inuse--;
592 memset (entry, 0, sizeof (*entry));
597 ioam_cache_ts_table_destroy (vlib_main_t * vm)
599 ioam_cache_main_t *cm = &ioam_cache_main;
600 ioam_cache_ts_entry_t *entry = 0;
601 int no_of_threads = vec_len (vlib_worker_threads);
604 /* free pool and hash table */
605 for (i = 0; i < no_of_threads; i++)
607 pool_foreach (entry, cm->ioam_ts_pool[i], (
609 ioam_cache_ts_entry_free (i,
611 cm->error_node_index);
614 pool_free (cm->ioam_ts_pool[i]);
615 cm->ioam_ts_pool = 0;
616 tw_timer_wheel_free_16t_2w_512sl (&cm->timer_wheels[i]);
618 vec_free (cm->ioam_ts_pool);
623 ioam_cache_ts_entry_cleanup (u32 thread_id, u32 pool_index)
625 ioam_cache_main_t *cm = &ioam_cache_main;
626 ioam_cache_ts_entry_t *entry = 0;
628 entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
629 ioam_cache_ts_entry_free (thread_id, entry, cm->error_node_index);
634 * Caches buffer for ioam SR tunnel select for Anycast service
637 ioam_cache_ts_add (ip6_header_t * ip0,
641 u8 max_responses, u64 now, u32 thread_id, u32 * pool_index)
643 ioam_cache_main_t *cm = &ioam_cache_main;
644 ioam_cache_ts_entry_t *entry = 0;
646 if (cm->ts_stats[thread_id].inuse == MAX_CACHE_TS_ENTRIES)
648 cm->ts_stats[thread_id].add_failed++;
652 pool_get_aligned (cm->ioam_ts_pool[thread_id], entry,
653 CLIB_CACHE_LINE_BYTES);
654 memset (entry, 0, sizeof (*entry));
655 *pool_index = entry - cm->ioam_ts_pool[thread_id];
657 clib_memcpy (entry->dst_address.as_u64, ip0->dst_address.as_u64,
658 sizeof (ip6_address_t));
659 clib_memcpy (entry->src_address.as_u64, ip0->src_address.as_u64,
660 sizeof (ip6_address_t));
661 entry->src_port = src_port;
662 entry->dst_port = dst_port;
663 entry->seq_no = seq_no;
664 entry->response_received = 0;
665 entry->max_responses = max_responses;
666 entry->created_at = now;
668 entry->buffer_index = 0;
669 entry->pool_id = thread_id;
670 entry->pool_index = *pool_index;
671 ioam_cache_ts_timer_set (cm, entry, IOAM_CACHE_TS_TIMEOUT);
672 cm->ts_stats[thread_id].inuse++;
677 ioam_cache_ts_send (u32 thread_id, i32 pool_index)
679 ioam_cache_main_t *cm = &ioam_cache_main;
680 ioam_cache_ts_entry_t *entry = 0;
682 entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
683 if (!pool_is_free (cm->ioam_ts_pool[thread_id], entry) && entry)
685 /* send and free pool entry */
686 ioam_cache_ts_entry_free (thread_id, entry, cm->ip6_hbh_pop_node_index);
691 ioam_cache_ts_check_and_send (u32 thread_id, i32 pool_index)
693 ioam_cache_main_t *cm = &ioam_cache_main;
694 ioam_cache_ts_entry_t *entry = 0;
695 entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
696 if (entry && entry->hbh)
698 if (entry->response_received == entry->max_responses ||
699 entry->created_at + IOAM_CACHE_TS_TIMEOUT <=
700 vlib_time_now (cm->vlib_main))
702 ioam_cache_ts_timer_reset (cm, entry);
703 ioam_cache_ts_send (thread_id, pool_index);
709 ioam_cache_ts_update (u32 thread_id,
711 u32 buffer_index, ip6_hop_by_hop_header_t * hbh)
713 ioam_cache_main_t *cm = &ioam_cache_main;
714 ioam_cache_ts_entry_t *entry = 0;
715 vlib_main_t *vm = cm->vlib_main;
716 vlib_frame_t *nf = 0;
719 entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
720 if (!pool_is_free (cm->ioam_ts_pool[thread_id], entry) && entry)
722 /* drop existing buffer */
725 nf = vlib_get_frame_to_node (vm, cm->error_node_index);
727 to_next = vlib_frame_vector_args (nf);
729 to_next[0] = entry->buffer_index;
730 vlib_put_frame_to_node (vm, cm->error_node_index, nf);
733 entry->buffer_index = buffer_index;
736 ioam_cache_ts_check_and_send (thread_id, pool_index);
743 * looks up the entry based on the e2e option pool index
744 * result = 0 found the entry
745 * result < 0 indicates failture to find an entry
748 ioam_cache_ts_lookup (ip6_header_t * ip0,
753 ip6_hop_by_hop_header_t ** hbh,
754 u32 * pool_index, u8 * thread_id, u8 response_seen)
756 ioam_cache_main_t *cm = &ioam_cache_main;
757 ip6_hop_by_hop_header_t *hbh0 = 0;
758 ioam_e2e_cache_option_t *e2e = 0;
760 hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
762 (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset);
763 if ((u8 *) e2e < ((u8 *) hbh0 + ((hbh0->length + 1) << 3))
764 && e2e->hdr.type == HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID)
766 ioam_cache_ts_entry_t *entry = 0;
767 *pool_index = e2e->pool_index;
768 *thread_id = e2e->pool_id;
769 entry = pool_elt_at_index (cm->ioam_ts_pool[*thread_id], *pool_index);
772 ip6_address_compare (&ip0->src_address, &entry->dst_address) == 0 &&
773 ip6_address_compare (&ip0->dst_address, &entry->src_address) == 0 &&
774 entry->src_port == dst_port &&
775 entry->dst_port == src_port && entry->seq_no == seq_no)
778 entry->response_received += response_seen;
790 format_ioam_cache_ts_entry (u8 * s, va_list * args)
792 ioam_cache_ts_entry_t *e = va_arg (*args, ioam_cache_ts_entry_t *);
793 u32 thread_id = va_arg (*args, u32);
794 ioam_cache_main_t *cm = &ioam_cache_main;
795 ioam_e2e_id_option_t *e2e = 0;
796 vlib_main_t *vm = cm->vlib_main;
797 clib_time_t *ct = &vm->clib_time;
805 ip6_ioam_find_hbh_option (e->hbh,
806 HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID);
810 "%d: %U:%d to %U:%d seq_no %u buffer %u %U \n\t\tCreated at %U Received %d\n",
811 (e - cm->ioam_ts_pool[thread_id]), format_ip6_address,
812 &e->src_address, e->src_port, format_ip6_address,
813 &e->dst_address, e->dst_port, e->seq_no, e->buffer_index,
814 format_ip6_address, e2e ? &e2e->id : 0, format_time_interval,
817 vm->cpu_time_main_loop_start) * ct->seconds_per_clock,
818 e->response_received);
824 "%d: %U:%d to %U:%d seq_no %u Buffer %u \n\t\tCreated at %U Received %d\n",
825 (e - cm->ioam_ts_pool[thread_id]), format_ip6_address,
826 &e->src_address, e->src_port, format_ip6_address,
827 &e->dst_address, e->dst_port, e->seq_no, e->buffer_index,
828 format_time_interval, "h:m:s:u",
830 vm->cpu_time_main_loop_start) * ct->seconds_per_clock,
831 e->response_received);
839 * Get extended rewrite string for iOAM data in v6
840 * This makes space for an e2e options to carry cache pool info
841 * and manycast server address.
842 * It set the rewrite string per configs in ioam ip6 + new option
843 * for cache along with offset to the option to populate cache
847 ip6_ioam_ts_cache_set_rewrite (void)
849 ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
850 ioam_cache_main_t *cm = &ioam_cache_main;
851 ip6_hop_by_hop_header_t *hbh;
853 ioam_e2e_cache_option_t *e2e = 0;
854 ioam_e2e_id_option_t *e2e_id = 0;
856 vec_free (cm->rewrite);
857 ip6_ioam_set_rewrite (&(cm->rewrite), hm->has_trace_option,
858 hm->has_pot_option, hm->has_seqno_option);
859 hbh = (ip6_hop_by_hop_header_t *) cm->rewrite;
860 rewrite_len = ((hbh->length + 1) << 3);
861 vec_validate (cm->rewrite,
862 rewrite_len - 1 + IOAM_E2E_CACHE_OPTION_RND +
863 IOAM_E2E_ID_OPTION_RND);
864 hbh = (ip6_hop_by_hop_header_t *) cm->rewrite;
865 /* setup e2e id option to insert pool id and index of the node caching it */
866 hbh->length += IOAM_E2E_CACHE_HBH_EXT_LEN + IOAM_E2E_ID_HBH_EXT_LEN;
867 cm->rewrite_pool_index_offset = rewrite_len;
868 e2e = (ioam_e2e_cache_option_t *) (cm->rewrite + rewrite_len);
869 e2e->hdr.type = HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID
870 | HBH_OPTION_TYPE_SKIP_UNKNOWN;
871 e2e->hdr.length = sizeof (ioam_e2e_cache_option_t) -
872 sizeof (ip6_hop_by_hop_option_t);
875 (ioam_e2e_id_option_t *) ((u8 *) e2e + sizeof (ioam_e2e_cache_option_t));
877 HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID | HBH_OPTION_TYPE_SKIP_UNKNOWN;
879 sizeof (ioam_e2e_id_option_t) - sizeof (ip6_hop_by_hop_option_t);
880 e2e_id->e2e_type = 1;
886 ip6_ioam_ts_cache_cleanup_rewrite (void)
888 ioam_cache_main_t *cm = &ioam_cache_main;
890 vec_free (cm->rewrite);
892 cm->rewrite_pool_index_offset = 0;
895 #endif /* __included_ioam_cache_h__ */
898 * fd.io coding-style-patch-verification: ON
901 * eval: (c-set-style "gnu")