2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef PLUGINS_IOAM_PLUGIN_IOAM_ANALYSE_IOAM_ANALYSE_H_
17 #define PLUGINS_IOAM_PLUGIN_IOAM_ANALYSE_IOAM_ANALYSE_H_
19 #include <vlib/vlib.h>
20 #include <vnet/vnet.h>
21 #include <vppinfra/types.h>
22 #include <ioam/lib-e2e/e2e_util.h>
23 #include <ioam/lib-trace/trace_util.h>
24 #include <ioam/lib-trace/trace_config.h>
26 #define IOAM_FLOW_TEMPLATE_ID 260
27 #define IOAM_TRACE_MAX_NODES 10
28 #define IOAM_MAX_PATHS_PER_FLOW 10
38 /** @brief Analysed iOAM trace data.
43 /** No of nodes in path. */
46 /** Data contained in trace - NodeId, TTL, Ingress & Egress Link, Timestamp. */
49 /** Flag to indicate whether node is allocated. */
54 /** Actual PATH flow has taken. */
55 ioam_path_map_t path[IOAM_TRACE_MAX_NODES];
57 /** Num of pkts in the flow going over path. */
60 /** Num of bytes in the flow going over path. */
63 /** Minumum Dealay for the flow. */
66 /** Maximum Dealay for the flow. */
69 /** Average Dealay for the flow. */
73 } ioam_analyse_trace_record;
77 ioam_analyse_trace_record path_data[IOAM_MAX_PATHS_PER_FLOW];
78 } ioam_analyse_trace_data;
80 /** @brief Analysed iOAM pot data.
85 /** Number of packets validated (passes through the service chain)
86 within the timestamps. */
87 u32 sfc_validated_count;
89 /** Number of packets invalidated (failed through the service chain)
90 within the timestamps. */
91 u32 sfc_invalidated_count;
92 } ioam_analyse_pot_data;
94 /** @brief Analysed iOAM data.
97 typedef struct ioam_analyser_data_t_
99 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
104 /** Num of pkts sent for this flow. */
107 /** Num of pkts matching this flow. */
110 /** Num of bytes matching this flow. */
113 /** Analysed iOAM trace data. */
114 ioam_analyse_trace_data trace_data;
116 /** Analysed iOAM pot data. */
117 ioam_analyse_pot_data pot_data;
119 /** Analysed iOAM seqno data. */
120 seqno_rx_info seqno_data;
122 /** Cache of previously analysed data, useful for export. */
123 struct ioam_analyser_data_t_ *chached_data_list;
125 /** Lock to since we use this to export the data in other thread. */
126 volatile u32 *writer_lock;
127 } ioam_analyser_data_t;
130 ip6_ioam_analyse_calc_delay (ioam_trace_hdr_t * trace, u16 trace_len,
133 u16 size_of_all_traceopts;
134 u8 size_of_traceopt_per_node;
136 u32 *start_elt, *end_elt, *uturn_elt;;
137 u32 start_time, end_time;
140 size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type);
141 // Unknown trace type
142 if (size_of_traceopt_per_node == 0)
144 size_of_all_traceopts = trace_len; /*ioam_trace_type,data_list_elts_left */
146 num_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node);
147 if ((num_nodes == 0) || (num_nodes <= trace->data_list_elts_left))
150 num_nodes -= trace->data_list_elts_left;
152 start_elt = trace->elts;
155 (u32) ((size_of_traceopt_per_node / sizeof (u32)) * (num_nodes - 1));
157 if (oneway && (trace->ioam_trace_type & BIT_TTL_NODEID))
162 uturn_elt = start_elt - size_of_traceopt_per_node / sizeof (u32);
164 if ((clib_net_to_host_u32 (*start_elt) >> 24) <=
165 (clib_net_to_host_u32 (*uturn_elt) >> 24))
168 while (!done && (start_elt = uturn_elt) != end_elt);
170 if (trace->ioam_trace_type & BIT_TTL_NODEID)
175 if (trace->ioam_trace_type & BIT_ING_INTERFACE)
180 start_time = clib_net_to_host_u32 (*start_elt);
181 end_time = clib_net_to_host_u32 (*end_elt);
183 return (f64) (end_time - start_time);
187 ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
189 ioam_analyse_trace_data *trace_data;
190 ioam_analyse_trace_record *trace_record;
191 ioam_path_map_t *path;
194 while (__sync_lock_test_and_set (data->writer_lock, 1))
197 trace_data = &data->trace_data;
199 for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++)
201 trace_record = trace_data->path_data + i;
203 if (trace_record->is_free)
206 path = trace_record->path;
208 for (k = 0; k < trace_record->num_nodes; k++)
209 path[k].state_up = 0;
211 *(data->writer_lock) = 0;
215 ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
216 ioam_trace_hdr_t * trace, u16 trace_len)
218 ioam_analyse_trace_data *trace_data;
219 ioam_analyse_trace_record *trace_record;
220 ioam_path_map_t *path;
221 u8 i, j, k, num_nodes, max_nodes;
224 u16 ingress_if, egress_if;
225 u16 size_of_traceopt_per_node;
226 u16 size_of_all_traceopts;
228 while (__sync_lock_test_and_set (data->writer_lock, 1))
231 trace_data = &data->trace_data;
233 size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type);
234 if (0 == size_of_traceopt_per_node)
237 size_of_all_traceopts = trace_len;
239 ptr = (u8 *) trace->elts;
240 max_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node);
241 num_nodes = max_nodes - trace->data_list_elts_left;
243 for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++)
245 trace_record = trace_data->path_data + i;
246 path = trace_record->path;
248 if (trace_record->is_free)
251 for (j = max_nodes, k = 0; k < num_nodes; j--, k++)
254 (u8 *) ((u8 *) trace->elts +
255 (size_of_traceopt_per_node * (j - 1)));
257 nodeid = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff;
260 if (nodeid != path[k].node_id)
263 if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) ||
264 (trace->ioam_trace_type == TRACE_TYPE_IF))
266 ingress_if = clib_net_to_host_u16 (*((u16 *) ptr));
268 egress_if = clib_net_to_host_u16 (*((u16 *) ptr));
269 if ((ingress_if != path[k].ingress_if) ||
270 (egress_if != path[k].egress_if))
275 /* Found Match - set path hop state to up */
276 path[k].state_up = 1;
280 *(data->writer_lock) = 0;
284 ip6_ioam_analyse_hbh_trace (ioam_analyser_data_t * data,
285 ioam_trace_hdr_t * trace, u16 pak_len,
288 ioam_analyse_trace_data *trace_data;
289 u16 size_of_traceopt_per_node;
290 u16 size_of_all_traceopts;
291 u8 i, j, k, num_nodes, max_nodes;
294 u16 ingress_if, egress_if;
295 ioam_path_map_t *path = NULL;
296 ioam_analyse_trace_record *trace_record;
298 while (__sync_lock_test_and_set (data->writer_lock, 1))
301 trace_data = &data->trace_data;
303 size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type);
304 // Unknown trace type
305 if (size_of_traceopt_per_node == 0)
307 size_of_all_traceopts = trace_len;
309 ptr = (u8 *) trace->elts;
310 max_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node);
311 num_nodes = max_nodes - trace->data_list_elts_left;
313 for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++)
315 trace_record = trace_data->path_data + i;
317 if (trace_record->is_free ||
318 (num_nodes != trace_record->num_nodes) ||
319 (trace->ioam_trace_type != trace_record->trace_type))
322 path = trace_record->path;
324 for (j = max_nodes, k = 0; k < num_nodes; j--, k++)
327 (u8 *) ((u8 *) trace->elts +
328 (size_of_traceopt_per_node * (j - 1)));
330 nodeid = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff;
333 if (nodeid != path[k].node_id)
336 if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) ||
337 (trace->ioam_trace_type == TRACE_TYPE_IF))
339 ingress_if = clib_net_to_host_u16 (*((u16 *) ptr));
341 egress_if = clib_net_to_host_u16 (*((u16 *) ptr));
342 if ((ingress_if != path[k].ingress_if) ||
343 (egress_if != path[k].egress_if))
356 for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++)
358 trace_record = trace_data->path_data + i;
359 if (trace_record->is_free)
361 trace_record->is_free = 0;
362 trace_record->num_nodes = num_nodes;
363 trace_record->trace_type = trace->ioam_trace_type;
364 path = trace_data->path_data[i].path;
365 trace_record->pkt_counter = 0;
366 trace_record->bytes_counter = 0;
367 trace_record->min_delay = 0xFFFFFFFF;
368 trace_record->max_delay = 0;
369 trace_record->mean_delay = 0;
374 for (j = max_nodes, k = 0; k < num_nodes; j--, k++)
377 (u8 *) ((u8 *) trace->elts + (size_of_traceopt_per_node * (j - 1)));
379 path[k].node_id = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff;
382 if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) ||
383 (trace->ioam_trace_type == TRACE_TYPE_IF))
385 path[k].ingress_if = clib_net_to_host_u16 (*((u16 *) ptr));
387 path[k].egress_if = clib_net_to_host_u16 (*((u16 *) ptr));
392 /* Set path state to UP */
393 for (k = 0; k < num_nodes; k++)
394 path[k].state_up = 1;
396 trace_record->pkt_counter++;
397 trace_record->bytes_counter += pak_len;
398 if (trace->ioam_trace_type & BIT_TIMESTAMP)
400 /* Calculate time delay */
401 u32 delay = (u32) ip6_ioam_analyse_calc_delay (trace, trace_len, 0);
402 if (delay < trace_record->min_delay)
403 trace_record->min_delay = delay;
404 else if (delay > trace_record->max_delay)
405 trace_record->max_delay = delay;
407 u64 sum = (trace_record->mean_delay * data->seqno_data.rx_packets);
408 trace_record->mean_delay =
409 (u32) ((sum + delay) / (data->seqno_data.rx_packets + 1));
412 *(data->writer_lock) = 0;
417 ip6_ioam_analyse_hbh_e2e (ioam_analyser_data_t * data,
418 ioam_e2e_packet_t * e2e, u16 len)
420 while (__sync_lock_test_and_set (data->writer_lock, 1))
423 ioam_analyze_seqno (&data->seqno_data,
424 (u64) clib_net_to_host_u32 (e2e->e2e_data));
426 *(data->writer_lock) = 0;
431 format_path_map (u8 * s, va_list * args)
433 ioam_path_map_t *pm = va_arg (*args, ioam_path_map_t *);
434 u32 num_of_elts = va_arg (*args, u32);
437 for (i = 0; i < num_of_elts; i++)
441 "node_id: 0x%x, ingress_if: 0x%x, egress_if:0x%x, state:%s\n",
442 pm->node_id, pm->ingress_if, pm->egress_if,
443 pm->state_up ? "UP" : "DOWN");
451 print_analyse_flow (u8 * s, ioam_analyser_data_t * record)
454 ioam_analyse_trace_record *trace_record;
456 s = format (s, "pkt_sent : %u\n", record->pkt_sent);
457 s = format (s, "pkt_counter : %u\n", record->pkt_counter);
458 s = format (s, "bytes_counter : %u\n", record->bytes_counter);
460 s = format (s, "Trace data: \n");
462 for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++)
464 trace_record = record->trace_data.path_data + j;
465 if (trace_record->is_free)
468 s = format (s, "path_map:\n%U", format_path_map,
469 trace_record->path, trace_record->num_nodes);
470 s = format (s, "pkt_counter: %u\n", trace_record->pkt_counter);
471 s = format (s, "bytes_counter: %u\n", trace_record->bytes_counter);
473 s = format (s, "min_delay: %u\n", trace_record->min_delay);
474 s = format (s, "max_delay: %u\n", trace_record->max_delay);
475 s = format (s, "mean_delay: %u\n", trace_record->mean_delay);
478 s = format (s, "\nPOT data: \n");
479 s = format (s, "sfc_validated_count : %u\n",
480 record->pot_data.sfc_validated_count);
481 s = format (s, "sfc_invalidated_count : %u\n",
482 record->pot_data.sfc_invalidated_count);
484 s = format (s, "\nSeqno Data:\n");
487 "Lost Packets : %lu\n"
488 "Duplicate Packets : %lu\n"
489 "Reordered Packets : %lu\n",
490 record->seqno_data.rx_packets,
491 record->seqno_data.lost_packets,
492 record->seqno_data.dup_packets,
493 record->seqno_data.reordered_packets);
495 s = format (s, "\n");
500 ioam_analyse_init_data (ioam_analyser_data_t * data)
503 ioam_analyse_trace_data *trace_data;
507 /* We maintain data corresponding to last IP-Fix export, this may
508 * get extended in future to maintain history of data */
509 vec_validate_aligned (data->chached_data_list, 0, CLIB_CACHE_LINE_BYTES);
511 data->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
512 CLIB_CACHE_LINE_BYTES);
513 *(data->writer_lock) = 0;
515 trace_data = &(data->trace_data);
516 for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++)
517 trace_data->path_data[j].is_free = 1;
520 #endif /* PLUGINS_IOAM_PLUGIN_IOAM_ANALYSE_IOAM_ANALYSE_H_ */
523 * fd.io coding-style-patch-verification: ON
526 * eval: (c-set-style "gnu")