From: Shwetha Bhandari Date: Wed, 4 May 2016 06:12:57 +0000 (+0200) Subject: VPP-44: iOAM service and path verification using shamir's secret sharing X-Git-Tag: v16.06-rc1~33 X-Git-Url: https://gerrit.fd.io/r/gitweb?a=commitdiff_plain;h=05866a16c093124e4083d68c7f21f6f1d019959b;p=vpp.git VPP-44: iOAM service and path verification using shamir's secret sharing Change-Id: I445ad13f8f93cb75cacc94192c4ae85c8ca14e35 Signed-off-by: Shwetha Bhandari --- diff --git a/vnet/Makefile.am b/vnet/Makefile.am index 9393c1a8772..1239a8b8df4 100644 --- a/vnet/Makefile.am +++ b/vnet/Makefile.am @@ -733,6 +733,16 @@ libvnetplugin_la_SOURCES += \ nobase_include_HEADERS += \ vnet/plugin/plugin.h +######################################## +# Service Chain verification util +######################################## +libvnet_la_SOURCES += \ + vnet/lib-scv/scv_util.c + +nobase_include_HEADERS += \ + vnet/lib-scv/scv_util.h \ + vnet/lib-scv/math64.h + lib_LTLIBRARIES = libvnet.la libvnetplugin.la dpdk_libs = diff --git a/vnet/vnet/ip/ip6_hop_by_hop.c b/vnet/vnet/ip/ip6_hop_by_hop.c index 2282e8cae42..16650dde9b2 100644 --- a/vnet/vnet/ip/ip6_hop_by_hop.c +++ b/vnet/vnet/ip/ip6_hop_by_hop.c @@ -24,6 +24,7 @@ #include #include +#include /* Timestamp precision multipliers for seconds, milliseconds, microseconds * and nanoseconds respectively. @@ -157,6 +158,21 @@ static u8 * format_ioam_data_list_element (u8 * s, va_list * args) return s; } +static u8 * format_ioam_pow (u8 * s, va_list * args) +{ + ioam_pow_option_t * pow0 = va_arg (*args, ioam_pow_option_t *); + u64 random, cumulative; + random = cumulative = 0; + if (pow0) + { + random = clib_net_to_host_u64 (pow0->random); + cumulative = clib_net_to_host_u64 (pow0->cumulative); + } + + s = format (s, "random = 0x%Lx, Cumulative = 0x%Lx, Index = 0x%x", + random, cumulative, pow0->reserved_profile_id); + return s; +} static u8 * format_ip6_hop_by_hop_trace (u8 * s, va_list * args) { @@ -170,6 +186,7 @@ static u8 * format_ip6_hop_by_hop_trace (u8 * s, va_list * args) u32 * elt0; int elt_index; u8 type0; + ioam_pow_option_t * pow0; hbh0 = (ip6_hop_by_hop_header_t *)t->option_data; @@ -211,6 +228,9 @@ static u8 * format_ip6_hop_by_hop_trace (u8 * s, va_list * args) case HBH_OPTION_TYPE_IOAM_PROOF_OF_WORK: s = format (s, " POW opt present\n"); + pow0 = (ioam_pow_option_t *) opt0; + s = format (s, " %U\n", format_ioam_pow,pow0); + opt0 = (ip6_hop_by_hop_option_t *) (((u8 *)opt0) + sizeof (ioam_pow_option_t)); break; @@ -234,6 +254,7 @@ vlib_node_registration_t ip6_hop_by_hop_node; #define foreach_ip6_hop_by_hop_error \ _(PROCESSED, "Pkts with ip6 hop-by-hop options") \ +_(PROFILE_MISS, "Pkts with ip6 hop-by-hop options but no profile set") \ _(UNKNOWN_OPTION, "Unknown ip6 hop-by-hop options") typedef enum { @@ -359,9 +380,12 @@ ip6_hop_by_hop_node_fn (vlib_main_t * vm, ip6_hop_by_hop_header_t *hbh0; ip6_hop_by_hop_option_t *opt0, *limit0; ioam_trace_option_t * trace0; + ioam_pow_option_t * pow0; u32 * elt0; u8 type0; - + u64 random = 0, cumulative = 0; + u8 pow_encap = 0; + /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; to_next[0] = bi0; @@ -439,6 +463,96 @@ ip6_hop_by_hop_node_fn (vlib_main_t * vm, break; case HBH_OPTION_TYPE_IOAM_PROOF_OF_WORK: + pow_profile = scv_profile_find(pow_profile_index); + if (PREDICT_FALSE(!pow_profile)) + { + vlib_node_increment_counter (vm, + ip6_hop_by_hop_node.index, + IP6_HOP_BY_HOP_ERROR_PROFILE_MISS, 1); + + opt0 = (ip6_hop_by_hop_option_t *) + (((u8 *)opt0) + sizeof (ioam_pow_option_t)); + break; + } + pow0 = (ioam_pow_option_t *) opt0; + pow_encap = (pow0->random == 0); + if (pow_encap) + { + if (PREDICT_FALSE(total_pkts_using_this_profile >= + pow_profile->validity)) + { + /* Choose a new profile */ + u16 new_profile_index; + new_profile_index = + scv_get_next_profile_id(vm, + pow_profile_index); + if (new_profile_index != pow_profile_index) + { + /* Got a new profile */ + scv_profile_invalidate(vm, hm, + pow_profile_index, + pow_encap); + pow_profile_index = new_profile_index; + pow_profile = + scv_profile_find(pow_profile_index); + total_pkts_using_this_profile = 0; + } + else + { + scv_profile_invalidate(vm, hm, + pow_profile_index, + pow_encap); + } + } + pow0->reserved_profile_id = + pow_profile_index & PROFILE_ID_MASK; + total_pkts_using_this_profile++; + } + else + { /* Non encap node */ + if (PREDICT_FALSE(pow0->reserved_profile_id != + pow_profile_index)) + { + /* New profile announced by encap node. */ + scv_profile *new_profile = 0; + new_profile = + scv_profile_find(pow0->reserved_profile_id); + if (PREDICT_FALSE(new_profile == 0 || + new_profile->validity == 0)) + { + /* Profile is invalid. Use old profile*/ + vlib_node_increment_counter (vm, + ip6_hop_by_hop_node.index, + IP6_HOP_BY_HOP_ERROR_PROFILE_MISS, 1); + scv_profile_invalidate(vm, hm, + pow0->reserved_profile_id, + pow_encap); + } + else + { + scv_profile_invalidate(vm, hm, + pow_profile_index, + pow_encap); + pow_profile_index = pow0->reserved_profile_id; + pow_profile = new_profile; + total_pkts_using_this_profile = 0; + } + } + total_pkts_using_this_profile++; + } + + if (pow0->random == 0) + { + pow0->random = clib_host_to_net_u64( + scv_generate_random(pow_profile)); + pow0->cumulative = 0; + } + random = clib_net_to_host_u64(pow0->random); + cumulative = clib_net_to_host_u64(pow0->cumulative); + pow0->cumulative = clib_host_to_net_u64( + scv_update_cumulative(pow_profile, + cumulative, + random)); opt0 = (ip6_hop_by_hop_option_t *) (((u8 *)opt0) + sizeof (ioam_pow_option_t)); break; @@ -757,7 +871,9 @@ vlib_node_registration_t ip6_pop_hop_by_hop_node; #define foreach_ip6_pop_hop_by_hop_error \ _(PROCESSED, "Pkts w/ removed ip6 hop-by-hop options") \ -_(NO_HOHO, "Pkts w/ no ip6 hop-by-hop options") +_(NO_HOHO, "Pkts w/ no ip6 hop-by-hop options") \ +_(SCV_PASSED, "Pkts with SCV in Policy") \ +_(SCV_FAILED, "Pkts with SCV out of Policy") typedef enum { #define _(sym,str) IP6_POP_HOP_BY_HOP_ERROR_##sym, @@ -772,6 +888,69 @@ static char * ip6_pop_hop_by_hop_error_strings[] = { #undef _ }; +static inline void ioam_end_of_path_validation (vlib_main_t * vm, + ip6_header_t *ip0, + ip6_hop_by_hop_header_t *hbh0) +{ + ip6_hop_by_hop_option_t *opt0, *limit0; + ioam_pow_option_t * pow0; + u8 type0; + u64 final_cumulative = 0; + u64 random = 0; + u8 result = 0; + + if (!hbh0 || !ip0) return; + + opt0 = (ip6_hop_by_hop_option_t *)(hbh0+1); + limit0 = (ip6_hop_by_hop_option_t *) + ((u8 *)hbh0 + ((hbh0->length+1)<<3)); + + /* Scan the set of h-b-h options, process ones that we understand */ + while (opt0 < limit0) + { + type0 = opt0->type & HBH_OPTION_TYPE_MASK; + switch (type0) + { + case HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE: + case HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST: + opt0 = (ip6_hop_by_hop_option_t *) + (((u8 *)opt0) + opt0->length + + sizeof (ip6_hop_by_hop_option_t)); + break; + case HBH_OPTION_TYPE_IOAM_PROOF_OF_WORK: + pow0 = (ioam_pow_option_t *) opt0; + random = clib_net_to_host_u64(pow0->random); + final_cumulative = clib_net_to_host_u64(pow0->cumulative); + result = scv_validate (pow_profile, + final_cumulative, random); + + if (result == 1) + { + vlib_node_increment_counter (vm, ip6_pop_hop_by_hop_node.index, + IP6_POP_HOP_BY_HOP_ERROR_SCV_PASSED, result); + } + else + { + vlib_node_increment_counter (vm, ip6_pop_hop_by_hop_node.index, + IP6_POP_HOP_BY_HOP_ERROR_SCV_FAILED, 1); + } + /* TODO: notify the scv failure*/ + opt0 = (ip6_hop_by_hop_option_t *) + (((u8 *)opt0) + sizeof (ioam_pow_option_t)); + break; + + case 0: /* Pad */ + opt0 = (ip6_hop_by_hop_option_t *) ((u8 *)opt0) + 1; + break; + + default: + format(0, "Something is wrong\n"); + break; + } + } +} + + static uword ip6_pop_hop_by_hop_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -910,14 +1089,14 @@ ip6_pop_hop_by_hop_node_fn (vlib_main_t * vm, next0 = IP6_HBYH_INPUT_NEXT_IP6_LOOKUP; goto out1; } - else - { /* Second pass */ - /* Collect data from trace via callback */ - next0 = ioam_end_of_path_cb ? - ioam_end_of_path_cb (vm, node, b0, ip0, adj0) : - IP6_HBYH_INPUT_NEXT_IP6_REWRITE; - } + /* Second pass */ + /* Collect data from trace via callback */ + next0 = ioam_end_of_path_cb ? + ioam_end_of_path_cb (vm, node, b0, ip0, adj0) : + IP6_HBYH_INPUT_NEXT_IP6_REWRITE; + /* TODO:Temporarily doing it here.. do this validation in end_of_path_cb */ + ioam_end_of_path_validation(vm, ip0, hbh0); /* Pop the trace data */ vlib_buffer_advance (b0, (hbh0->length+1)<<3); new_l0 = clib_net_to_host_u16 (ip0->payload_length) - @@ -1377,11 +1556,13 @@ VLIB_CLI_COMMAND (ip6_set_ioam_destination_cmd, static) = { .function = ip6_set_ioam_destination_command_fn, }; + void vnet_register_ioam_end_of_path_callback (void *cb) { ip6_hop_by_hop_main_t * hm = &ip6_hop_by_hop_main; hm->ioam_end_of_path_cb = cb; } + diff --git a/vnet/vnet/ip/ip6_hop_by_hop.h b/vnet/vnet/ip/ip6_hop_by_hop.h index 2ec83f2be35..f9bec13ed58 100644 --- a/vnet/vnet/ip/ip6_hop_by_hop.h +++ b/vnet/vnet/ip/ip6_hop_by_hop.h @@ -16,6 +16,7 @@ #define __included_ip6_hop_by_hop_h__ #include +#include typedef struct { /* The current rewrite we're using */ diff --git a/vnet/vnet/ip/ip6_hop_by_hop_packet.h b/vnet/vnet/ip/ip6_hop_by_hop_packet.h index 22ca2055261..57433acb72e 100644 --- a/vnet/vnet/ip/ip6_hop_by_hop_packet.h +++ b/vnet/vnet/ip/ip6_hop_by_hop_packet.h @@ -39,9 +39,9 @@ typedef struct { } ip6_hop_by_hop_option_t; /* $$$$ IANA banana constants */ -#define HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST 1 -#define HBH_OPTION_TYPE_IOAM_PROOF_OF_WORK 2 -#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE 3 +#define HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST 27 +#define HBH_OPTION_TYPE_IOAM_PROOF_OF_WORK 28 +#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE 29 /* typedef struct { diff --git a/vnet/vnet/lib-scv/math64.h b/vnet/vnet/lib-scv/math64.h new file mode 100644 index 00000000000..9ee6e438f90 --- /dev/null +++ b/vnet/vnet/lib-scv/math64.h @@ -0,0 +1,159 @@ +/* + * math64.h provides the 64 bit unsigned integer add, multiply followed by modulo operation + * The linux/math64.h provides divide and multiply 64 bit integers but: + * 1. multiply: mul_u64_u64_shr - only returns 64 bits of the result and has to be called + * twice to get the complete 128 bits of the result. + * 2. Modulo operation of the result of addition and multiplication of u64 that may result + * in integers > 64 bits is not supported + * Hence this header to combine add/multiply followed by modulo of u64 integrers + * always resulting in u64. + * + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef include_vnet_math64_h +#define include_vnet_math64_h +#include + +/* + * multiplies and returns result in hi and lo + */ +static inline void mul64by64(u64 a, u64 b, u64 * hi, u64 * lo) +{ + u64 a_lo = (u64) (uint32_t) a; + u64 a_hi = a >> 32; + u64 b_lo = (u64) (u32) b; + u64 b_hi = b >> 32; + + u64 p0 = a_lo * b_lo; + u64 p1 = a_lo * b_hi; + u64 p2 = a_hi * b_lo; + u64 p3 = a_hi * b_hi; + + u32 cy = (u32) (((p0 >> 32) + (u32) p1 + (u32) p2) >> 32); + + *lo = p0 + (p1 << 32) + (p2 << 32); + *hi = p3 + (p1 >> 32) + (p2 >> 32) + cy; + return; +} + +#define TWO64 18446744073709551616.0 + +static inline u64 mod128by64(u64 x, u64 y, u64 m, double di) +{ + u64 q1, q2, q; + u64 p1, p0; + double dq; + + /* calculate quotient first pass 53 bits */ + dq = (TWO64 * (double)x + (double)y) * di; + + if (dq >= TWO64) + q1 = 0xfffffffffffff800L; + else + q1 = dq; + + /* q1 * m to compare the product to the dividend. */ + mul64by64(q1, m, &p1, &p0); + + /* Adjust quotient. is it > actual result: */ + if (x < p1 || (x == p1 && y < p0)) + { + /* q1 > quotient. calculate abs remainder */ + x = p1 - (x + (p0 < y)); + y = p0 - y; + + /* use the remainder as new dividend to adjust quotient */ + q2 = (u64) ((TWO64 * (double)x + (double)y) * di); + mul64by64(q2, m, &p1, &p0); + + q = q1 - q2; + if (x < p1 || (x == p1 && y <= p0)) + { + y = p0 - y; + } + else + { + y = p0 - y; + y += m; + q--; + } + } + else + { + x = x - (p1 + (y < p0)); + y = y - p0; + + q2 = (u64) ((TWO64 * (double)x + (double)y) * di); + mul64by64(q2, m, &p1, &p0); + + q = q1 + q2; + if (x < p1 || (x == p1 && y < p0)) + { + y = y - p0; + y += m; + q--; + } + else + { + y = y - p0; + if (y >= m) + { + y -= m; + q++; + } + } + } + + return y; +} + +/* + * returns a % p + */ +static inline u64 mod64by64(u64 a, u64 p, u64 primeinv) +{ + return (mod128by64(0, a, p, primeinv)); +} + +static inline void add64(u64 a, u64 b, u64 * whi, u64 * wlo) +{ + *wlo = a + b; + if (*wlo < a) + *whi = 1; + +} + +/* + * returns (a + b)%p + */ +static inline u64 add64_mod(u64 a, u64 b, u64 p, double pi) +{ + u64 shi = 0, slo = 0; + + add64(a, b, &shi, &slo); + return (mod128by64(shi, slo, p, pi)); +} + +/* + * returns (ab) % p + */ +static inline u64 mul64_mod(u64 a, u64 b, u64 p, double pi) +{ + u64 phi = 0, plo = 0; + + mul64by64(a, b, &phi, &plo); + return (mod128by64(phi, plo, p, pi)); +} + +#endif diff --git a/vnet/vnet/lib-scv/scv_util.c b/vnet/vnet/lib-scv/scv_util.c new file mode 100644 index 00000000000..e69c8837815 --- /dev/null +++ b/vnet/vnet/lib-scv/scv_util.c @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include "math64.h" +#include "scv_util.h" + +scv_profile *pow_profile = NULL; +u16 pow_profile_index = 0; +u64 total_pkts_using_this_profile = 0; +u8 chain_path_name[PATH_NAME_SIZE]; +scv_profile profile_list[MAX_SERVICE_PROFILES]; +u8 max_profiles = 0; +u16 invalid_profile_start_index = 0; +u8 number_of_invalid_profiles = 0; +f64 next_time_to_send = 0; +u32 time_exponent = 1; +vlib_main_t *gvm = 0; + +static void scv_profile_init(scv_profile * new, u16 id) +{ + if (new) + { + memset(new, 0, sizeof(scv_profile)); + new->id = id; + } +} + +/* + * Get maximum number of profiles configured for this chain. + */ +u8 scv_get_max_profiles(void) +{ + return max_profiles; +} + +scv_profile *scv_profile_find(u16 id) +{ + u8 max = scv_get_max_profiles(); + + if (id >= 0 && id < max) + { + return (&profile_list[id]); + } + return (NULL); +} + +u8 sc_init_done = 0; +void scv_init(u8 * path_name, u8 max, u8 indx) +{ + int i = 0; + + if (sc_init_done) + { + return; + } + memcpy(chain_path_name, path_name, strlen((const char *)path_name) + 1); + max_profiles = max; + pow_profile_index = indx; + + for (i = 0; i < max_profiles; i++) + { + scv_profile_init(&profile_list[i], i); + } + + sc_init_done = 1; +} + +void scv_profile_cleanup(scv_profile * profile) +{ + u16 id = profile->id; + + memset(profile, 0, sizeof(scv_profile)); + profile->id = id; /* Restore id alone */ +} + +void scv_profile_create(scv_profile * profile, u64 prime, + u64 poly2, u64 lpc, u64 secret_share, u64 validity) +{ + if (profile) + { + scv_profile_cleanup(profile); + profile->prime = prime; + profile->primeinv = 1.0 / prime; + profile->lpc = lpc; + profile->poly_pre_eval = poly2; + profile->secret_share = secret_share; + profile->validity = validity; + time_exponent = 1; /* Got a new profile. Reset backoff */ + next_time_to_send = 0; /* and send next request with no delay */ + } +} + +void scv_set_validator(scv_profile * profile, u64 key) +{ + if (profile) + { + profile->validator = 1; + profile->secret_key = key; + } +} + +static inline u64 sc_update_cumulative(u64 cumulative, u64 random, + u64 secret_share, u64 prime, u64 lpc, u64 pre_split, double prime_inv) +{ + u64 share_random = 0; + u64 cumulative_new = 0; + + /* + * calculate split share for random + */ + share_random = add64_mod(pre_split, random, prime, prime_inv); + + /* + * lpc * (share_secret + share_random) + */ + share_random = add64_mod(share_random, secret_share, prime, prime_inv); + share_random = mul64_mod(share_random, lpc, prime, prime_inv); + + cumulative_new = add64_mod(cumulative, share_random, prime, prime_inv); + + return (cumulative_new); +} + +u64 scv_update_cumulative(scv_profile * profile, u64 cumulative, u64 random) +{ + if (profile && profile->validity != 0) + { + return (sc_update_cumulative(cumulative, random, profile->secret_share, + profile->prime, profile->lpc, profile->poly_pre_eval, + profile->primeinv)); + } + return (0); +} + +static u8 sc_validate(u64 secret, u64 prime, double prime_inv, + u64 cumulative, u64 random) +{ + if (cumulative == (random + secret)) + { + return (1); + } + else if (cumulative == add64_mod(random, secret, prime, prime_inv)) + { + return (1); + } + return (0); +} + +/* + * return True if the cumulative matches secret from a profile + */ +u8 scv_validate(scv_profile * profile, u64 cumulative, u64 random) +{ + if (profile && profile->validator) + { + return (sc_validate(profile->secret_key, profile->prime, + profile->primeinv, cumulative, random)); + } + return (0); +} + +/* + * Utility function to get random number per pack + */ +u64 scv_generate_random(scv_profile * profile) +{ + u64 random = 0; + int32_t second_half; + static u32 seed = 0; + + if (PREDICT_FALSE(!seed)) + seed = random_default_seed(); + + /* + * Upper 4 bytes seconds + */ + random = (u64) time(NULL); + + random &= 0xffffffff; + random = random << 32; + /* + * Lower 4 bytes random number + */ + second_half = random_u32(&seed); + + random |= second_half; + + if (PREDICT_TRUE(profile != NULL)) + { + random &= profile->bit_mask; + } + return (random); +} + +void scv_profile_set_bit_mask(scv_profile * profile, u16 bits) +{ + int sizeInBits; + + if (profile) + { + sizeInBits = sizeof(profile->bit_mask) * 8; + profile->bit_mask = + (bits >= + sizeInBits ? (u64) - 1 : (u64) ((u64) 1 << (u64) bits) - 1); + } +} + +/* + * TODO: Use vector buffers and hash tables + */ +#define MAX_SERVICES 16 + +clib_error_t *clear_scv_profile_command_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + int i = 0; + + if (!sc_init_done) + return 0; + + for (i = 0; i < max_profiles; i++) + { + scv_profile_cleanup(&profile_list[i]); + } + pow_profile = NULL; + pow_profile_index = 0; + total_pkts_using_this_profile = 0; + memset(chain_path_name, 0, PATH_NAME_SIZE); + max_profiles = 0; + invalid_profile_start_index = 0; + number_of_invalid_profiles = 0; + next_time_to_send = 0; + time_exponent = 1; + sc_init_done = 0; + + return 0; +} + +void clear_scv_profiles() +{ + clear_scv_profile_command_fn(0, 0, 0); +} + +VLIB_CLI_COMMAND(clear_scv_profile_command) = +{ +.path = "clear scv profile", +.short_help = "clear scv profile [|all]", +.function = clear_scv_profile_command_fn, +}; + +static clib_error_t *set_scv_profile_command_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + u64 prime; + u64 secret_share, validity; + u64 secret_key; + u8 validator = 0; + u16 profile_id; + u32 bits; + u64 lpc = 0, poly2 = 0; + scv_profile *profile = NULL; + + bits = MAX_BITS; + + while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT) + { + if (unformat(input, "id %d", &profile_id)) + ; + else if (unformat(input, "validate-key 0x%Lx", &secret_key)) + validator = 1; + else if (unformat(input, "prime-number 0x%Lx", &prime)) + ; + else if (unformat(input, "secret_share 0x%Lx", &secret_share)) + ; + else if (unformat(input, "polynomial2 0x%Lx", &poly2)) + ; + else if (unformat(input, "lpc 0x%Lx", &lpc)) + ; + else if (unformat(input, "validity 0x%Lx", &validity)) + ; + else if (unformat(input, "bits-in-random %d", &bits)) + { + if (bits > MAX_BITS) + bits = MAX_BITS; + } + else + return clib_error_return(0, "unknown input `%U'", + format_unformat_error, input); + } + + scv_init((u8 *) "TEST", MAX_SERVICE_PROFILES, 0 /* start index */ ); + profile = scv_profile_find(profile_id); + + if (profile) + { + scv_profile_create(profile, prime, poly2, lpc, secret_share, validity); + if (validator) + scv_set_validator(profile, secret_key); + scv_profile_set_bit_mask(profile, bits); + } + + return 0; +} + +VLIB_CLI_COMMAND(set_scv_profile_command) = +{ +.path = "set scv profile", +.short_help = "set scv profile id [0-16] [validator-key 0xu64] \ + prime-number 0xu64 secret_share 0xu64 lpc 0xu64 \ + polynomial2 0xu64 bits-in-random [0-64] ", +.function = set_scv_profile_command_fn, +}; + +static clib_error_t *show_scv_profile_command_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + scv_profile *p = NULL; + u16 i; + u8 *s = 0; + + if (sc_init_done == 0) + { + s = format(s, "SCV Profiles not configured\n"); + vlib_cli_output(vm, "%v", s); + return 0; + } + + for (i = 0; i < max_profiles; i++) + { + p = scv_profile_find(i); + if (p->validity == 0) + continue; + s = format(s, "SCV Profile at index: %d\n", i); + s = format(s, " Id : %d\n", p->id); + s = format(s, " Validator : %s (%d)\n", + (p->validator) ? "True" : "False", p->validator); + if (p->validator == 1) + s = format(s, " Secret key : 0x%Lx (%Ld)\n", + p->secret_key, p->secret_key); + s = format(s, " Secret share : 0x%Lx (%Ld)\n", + p->secret_share, p->secret_share); + s = format(s, " Prime number : 0x%Lx (%Ld)\n", + p->prime, p->prime); + s = format(s, "2nd polynomial(eval) : 0x%Lx (%Ld)\n", + p->poly_pre_eval, p->poly_pre_eval); + s = format(s, " LPC : 0x%Lx (%Ld)\n", p->lpc, p->lpc); + + s = format(s, " Bit mask : 0x%Lx (%Ld)\n", + p->bit_mask, p->bit_mask); + s = format(s, " Validity : 0x%Lx (%Ld)\n", + p->validity, p->validity); + } + + if (max_profiles) + { + p = scv_profile_find(pow_profile_index); + + s = format(s, "\nInvalid profiles start : %d Number : %d\n", + invalid_profile_start_index, number_of_invalid_profiles); + + if (next_time_to_send) + s = format(s, "\nNext time to send : %U, time_exponent:%ld\n", + format_time_interval, "d:h:m:s:f:u", + next_time_to_send, time_exponent); + else + s = format(s, "\nNext time to send : Immediate\n"); + s = format(s, "\nPath name : %s\n", chain_path_name); + s = format(s, "\nProfile index in use: %d\n", pow_profile_index); + s = format(s, "Pkts passed : 0x%Lx (validity:0x%Lx)\n", + total_pkts_using_this_profile, p->validity); + if (scv_is_decap(p)) + s = format(s, " This is Decap node. \n"); + vlib_cli_output(vm, "%v", s); + } + vec_free(s); + + return 0; +} + +VLIB_CLI_COMMAND(show_scv_profile_command) = +{ +.path = "show scv profile", +.short_help = "show scv profile", +.function = show_scv_profile_command_fn, +}; + +static clib_error_t *test_profile_renew_refresh_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + u8 renew_or_refresh = 0; + +#define TEST_PROFILE_RENEW 1 +#define TEST_PROFILE_REFRESH 2 + u8 *path_name = 0; + u32 start_index = 0, num_profiles = 0; + int rc = 0; + + while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT) + { + if (unformat(input, "path-name %s start-index %d num-profiles %d", + &path_name, &start_index, &num_profiles)) + ; + else if (unformat(input, "renew")) + renew_or_refresh = TEST_PROFILE_RENEW; + else if (unformat(input, "refresh")) + renew_or_refresh = TEST_PROFILE_REFRESH; + else + break; + } + + if (renew_or_refresh == TEST_PROFILE_RENEW) + { + + rc = scv_profile_renew(path_name, (u8) start_index, (u8) num_profiles); + } + else if (renew_or_refresh == TEST_PROFILE_REFRESH) + { + + rc = scv_profile_refresh(path_name, (u8) start_index, + (u8) num_profiles); + } + else + { + vec_free(path_name); + return clib_error_return(0, "Enter renew or refresh"); + } + + vlib_cli_output(vm, "%s notification %s. rc = %d\n", + (renew_or_refresh == TEST_PROFILE_RENEW) ? "Renew" : "Refresh", + (rc != 0) ? "failed" : "sent", (u32) rc); + + vec_free(path_name); + + return 0; +} + +VLIB_CLI_COMMAND(test_ioam_profile_renew_refresh_cmd, static) = +{ +.path = "test ioam profile-notification ", +.short_help = + "test ioam profile-notification path-name start-index num-profiles ", +.function = test_profile_renew_refresh_fn, +}; + +static clib_error_t *set_scv_init_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + u8 *path_name = 0; + u32 start_index = 0, num_profiles = 0; + + while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT) + { + if (unformat(input, "path-name %s start-index %d num-profiles %d", + &path_name, &start_index, &num_profiles)) + scv_init(path_name, num_profiles, start_index); + else + return clib_error_return(0, "unknown input `%U'", + format_unformat_error, input); + } + vec_free(path_name); + return 0; +} + +VLIB_CLI_COMMAND(set_ioam_sc_init_command, static) = +{ +.path = "set scv-init ", +.short_help = + "set scv-init path-name start-index num-profiles ", +.function = set_scv_init_fn, +}; diff --git a/vnet/vnet/lib-scv/scv_util.h b/vnet/vnet/lib-scv/scv_util.h new file mode 100644 index 00000000000..fc0c73f47b1 --- /dev/null +++ b/vnet/vnet/lib-scv/scv_util.h @@ -0,0 +1,278 @@ +/* + * scv_util.h -- Service chain validation/Proof Of Transit Utility Header + * + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef include_vnet_scv_util_h +#define include_vnet_scv_util_h + +#include +#define MAXDEGREE 1024 +#define MAXTOKENLEN 128 +#define debug_ioam debug_ioam_fn +#define MAX_SERVICE_NODES 10 +/* Dont change this size 256. This is there across multiple components */ +#define PATH_NAME_SIZE 256 + +/* Ring size. this should be same as the one in ODL. Do not change this + without change in ODL. */ +#define MAX_SERVICE_PROFILES 16 + +/** + * Usage: + * + * On any [service] node that participates in Service / Path verfication: + * + * Step 1: Initialize this library by calling scv_init() + * Step 2: Setup a Service chain validation profile that contains all the parameters needed to compute cumulative: + * Call these functions: + * scv_profile_find + * scv_profile_create + * scv_profile_set_bit_mask - To setup how large we want the numbers used in the computation and random number <= 64 bits + * Step 2a: For validator do this: + * scv_set_validator + * Step 3a: At the initial Service node to generate Random number that will be read by all other nodes: + * scv_generate_random + * Step 3b: At all service nodes including initial and verifier call this to compute cumulative: + * scv_update_cumulative + * Step 4: At the verifier: + * scv_validate + * + */ + +typedef struct scv_profile_ +{ + u16 id; + u64 random; + u8 validator; + u64 secret_key; + u64 secret_share; + u64 prime; + u64 lpc; + u64 poly_pre_eval; + u64 bit_mask; + u64 limit; + u64 validity; + double primeinv; + // struct hlist_node my_hash_list; when this gets added to hashtbale +} scv_profile; + +extern scv_profile *pow_profile; +extern u16 pow_profile_index; +extern u64 total_pkts_using_this_profile; +extern u8 chain_path_name[PATH_NAME_SIZE]; +extern u16 invalid_profile_start_index; +extern u8 number_of_invalid_profiles; +extern f64 next_time_to_send; +extern u32 time_exponent; + +/* + * Initialize Service chain + */ +void scv_init(u8 * path_name, u8 max, u8 indx); + +/* + * Get maximum number of profiles configured for this chain. + */ +u8 scv_get_max_profiles(void); + +/* + * Find a SC profile by ID + */ +scv_profile *scv_profile_find(u16 id); + +static inline u16 scv_profile_get_id(scv_profile * profile) +{ + if (profile) + { + return (profile->id); + } + return (0); +} + +/* setup and clean up profile */ +void scv_profile_create(scv_profile * profile, u64 prime, + u64 poly2, u64 lpc, u64 secret_share, u64 validity); +/* + * Setup profile as a validator + */ +void scv_set_validator(scv_profile * profile, u64 key); +void scv_profile_cleanup(scv_profile * profile); + +/* + * Setup max bits to be used for random number generation + */ +#define MAX_BITS 64 +void scv_profile_set_bit_mask(scv_profile * profile, u16 bits); + +/* + * Given a random and cumulative compute the new cumulative for a given profile + */ +u64 scv_update_cumulative(scv_profile * profile, u64 cumulative, u64 random); + +/* + * return True if the cumulative matches secret from a profile + */ +u8 scv_validate(scv_profile * profile, u64 cumulative, u64 random); + +/* + * Utility function to get random number per pack + */ +u64 scv_generate_random(scv_profile * profile); + +int scv_profile_to_str(scv_profile * profile, char *buf, int n); + +extern void clear_ioam_scv_profiles(); + +static inline u8 scv_get_profile_in_use(void) +{ + return pow_profile_index; +} + +static inline + void scv_notification_reset(u16 start_index_recvd, u8 num_profiles_recvd) +{ + /* Profiles recevied w/o notn. Nothing to do. */ + if (number_of_invalid_profiles == 0) + return; + + /* Most likely case. Got all requested profiles */ + if (PREDICT_TRUE(num_profiles_recvd == number_of_invalid_profiles && + start_index_recvd == invalid_profile_start_index)) + { + number_of_invalid_profiles = 0; + invalid_profile_start_index = 0; + return; + } + + /* Received partial list */ + if (num_profiles_recvd < number_of_invalid_profiles) + { + ASSERT(start_index_recvd == invalid_profile_start_index); + invalid_profile_start_index = (start_index_recvd + num_profiles_recvd) + % scv_get_max_profiles(); + number_of_invalid_profiles -= num_profiles_recvd; + } + + return; +} + +int __attribute__ ((weak)) scv_profile_renew(u8 * path_name, + u8 start_index, u8 num_profiles); +int __attribute__ ((weak)) scv_profile_refresh(u8 * path_name, + u8 start_index, u8 num_profiles); + +static inline u8 scv_is_decap(scv_profile * p) +{ + return (p->validator == 1); +} + +static inline u16 scv_get_next_profile_id(vlib_main_t * vm, u16 id) +{ + int next_id, num_profiles = 0; + scv_profile *p; + u8 max; + + max = scv_get_max_profiles(); + + next_id = id; + + /* Check for new profile in the ring buffer until a valid one. Exclude + checking for the one already in use. */ + for (num_profiles = 0; num_profiles < max - 1; num_profiles++) + { + next_id = (next_id + 1) % max; + p = scv_profile_find(next_id); + if (p->validity != 0) + { + vlib_cli_output(vm, "Current id: %d, New id: %d\n", id, next_id); + return (next_id); + } + } + + return (id); +} + +static inline void +scv_profile_invalidate(vlib_main_t * vm, ip6_hop_by_hop_main_t * hm, + u16 id, u8 is_encap) +{ + scv_profile *p = scv_profile_find(id); + int rc; + u8 max; + f64 now = 0; + + p->validity = 0; + + /* If there are alredy profiles waiting. If so, use existing start_index. + */ + if (!number_of_invalid_profiles) + invalid_profile_start_index = id; + + max = scv_get_max_profiles(); + + /* Check whether the id is already included in existing list */ + if (!(id >= invalid_profile_start_index && + id <= (invalid_profile_start_index + + number_of_invalid_profiles - 1) % max)) + { + number_of_invalid_profiles++; + } + + if (number_of_invalid_profiles > scv_get_max_profiles()) + number_of_invalid_profiles = scv_get_max_profiles(); + + now = (f64) (((f64) hm->unix_time_0) + + (vlib_time_now(hm->vlib_main) - hm->vlib_time_0)); + if (now <= next_time_to_send) + return; + + if (is_encap) + { + rc = scv_profile_renew(chain_path_name, + (u8) invalid_profile_start_index, number_of_invalid_profiles); + if (rc != 0) + vlib_cli_output(vm, + "Renew notification- id start:%d, num %d failed. rc: %d\n", + invalid_profile_start_index, number_of_invalid_profiles, rc); + else + vlib_cli_output(vm, + "Renew notification- id start:%d num %d sent. \n", + invalid_profile_start_index, number_of_invalid_profiles); + + } + else + { + /* Non encap node. Send refresh notification for now. Later set a + timer and if there is no profile even after the timeout send + refresh notification. */ + rc = scv_profile_refresh(chain_path_name, + (u8) invalid_profile_start_index, number_of_invalid_profiles); + if (rc != 0) + vlib_cli_output(vm, + "Refresh notification- id start:%d, num %d failed. rc: %d\n", + invalid_profile_start_index, number_of_invalid_profiles, rc); + else + vlib_cli_output(vm, + "Refresh notification- id start:%d num %d sent. \n", + invalid_profile_start_index, number_of_invalid_profiles); + } + next_time_to_send = now + time_exponent; + time_exponent <<= 1; /* backoff time is power of 2 seconds */ + + return; +} + +#endif