X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FPLRsearch%2FPLRsearch.py;h=326aa2e2d28d6703d8f41e2f674c799f69923cb3;hb=ac8abea443a775c71e257bf18c38786dbd8f33d8;hp=b7c93443913ae2756f07c96536bccbb3d48083b9;hpb=fbbc47359e3f7b59bbd5a84d85c673374933a50a;p=csit.git diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py index b7c9344391..326aa2e2d2 100644 --- a/resources/libraries/python/PLRsearch/PLRsearch.py +++ b/resources/libraries/python/PLRsearch/PLRsearch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2024 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -17,20 +17,22 @@ import logging import math import multiprocessing import time + from collections import namedtuple import dill + from scipy.special import erfcx, erfc # TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH, # then switch to absolute imports within PLRsearch package. # Current usage of relative imports is just a short term workaround. from . import Integrator -from .log_plus import log_plus, log_minus from . import stat_trackers +from .log_plus import log_plus, log_minus -class PLRsearch(object): +class PLRsearch: """A class to encapsulate data relevant for the search method. The context is performance testing of packet processing systems. @@ -41,7 +43,7 @@ class PLRsearch(object): Two constants are stored as class fields for speed. - Method othed than search (and than __init__) + Method other than search (and than __init__) are just internal code structure. TODO: Those method names should start with underscore then. @@ -51,10 +53,18 @@ class PLRsearch(object): log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10))) def __init__( - self, measurer, trial_duration_per_trial, packet_loss_ratio_target, - trial_number_offset=0, timeout=1800.0, trace_enabled=False): + self, + measurer, + trial_duration_per_trial, + packet_loss_ratio_target, + trial_number_offset=0, + timeout=7200.0, + trace_enabled=False, + ): """Store rate measurer and additional parameters. + The measurer must never report negative loss count. + TODO: Copy AbstractMeasurer from MLRsearch. :param measurer: The measurer to call when searching. @@ -168,53 +178,77 @@ class PLRsearch(object): stop_time = time.time() + self.timeout min_rate = float(min_rate) max_rate = float(max_rate) - logging.info("Started search with min_rate %(min)r, max_rate %(max)r", - {"min": min_rate, "max": max_rate}) - trial_result_list = list() + logging.info( + f"Started search with min_rate {min_rate!r}, " + f"max_rate {max_rate!r}" + ) + trial_result_list = [] trial_number = self.trial_number_offset focus_trackers = (None, None) transmit_rate = (min_rate + max_rate) / 2.0 lossy_loads = [max_rate] - zeros = 0 # How many cosecutive zero loss results are happening. + zeros = 0 # How many consecutive zero loss results are happening. while 1: trial_number += 1 - logging.info("Trial %(number)r", {"number": trial_number}) + logging.info(f"Trial {trial_number!r}") results = self.measure_and_compute( - self.trial_duration_per_trial * trial_number, transmit_rate, - trial_result_list, min_rate, max_rate, focus_trackers) + self.trial_duration_per_trial * trial_number, + transmit_rate, + trial_result_list, + min_rate, + max_rate, + focus_trackers, + ) measurement, average, stdev, avg1, avg2, focus_trackers = results + # Workaround for unsent packets and other anomalies. + measurement.plr_loss_count = min( + measurement.intended_count, + int(measurement.intended_count * measurement.loss_ratio + 0.9), + ) + logging.debug( + f"loss ratio {measurement.plr_loss_count}" + f" / {measurement.intended_count}" + ) zeros += 1 # TODO: Ratio of fill rate to drain rate seems to have # exponential impact. Make it configurable, or is 4:3 good enough? - if measurement.loss_fraction >= self.packet_loss_ratio_target: + if measurement.plr_loss_count >= ( + measurement.intended_count * self.packet_loss_ratio_target + ): for _ in range(4 * zeros): - lossy_loads.append(measurement.target_tr) - if measurement.loss_count > 0: + lossy_loads.append(measurement.intended_load) + lossy_loads.sort() zeros = 0 - lossy_loads.sort() + logging.debug("High enough loss, lossy loads added.") + else: + logging.debug( + f"Not a high loss, zero counter bumped to {zeros}." + ) if stop_time <= time.time(): return average, stdev trial_result_list.append(measurement) if (trial_number - self.trial_number_offset) <= 1: next_load = max_rate elif (trial_number - self.trial_number_offset) <= 3: - next_load = (measurement.receive_rate / ( - 1.0 - self.packet_loss_ratio_target)) + next_load = measurement.relative_forwarding_rate / ( + 1.0 - self.packet_loss_ratio_target + ) else: next_load = (avg1 + avg2) / 2.0 if zeros > 0: if lossy_loads[0] > next_load: diminisher = math.pow(2.0, 1 - zeros) next_load = lossy_loads[0] + diminisher * next_load - next_load /= (1.0 + diminisher) + next_load /= 1.0 + diminisher # On zero measurement, we need to drain obsoleted low losses # even if we did not use them to increase next_load, # in order to get to usable loses at higher loads. if len(lossy_loads) > 3: lossy_loads = lossy_loads[3:] - logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r", - {"z": zeros, "o": (avg1 + avg2) / 2.0, - "n": next_load, "s": lossy_loads}) + logging.debug( + f"Zeros {zeros!r} orig {(avg1 + avg2) / 2.0!r} " + f"next {next_load!r} loads {lossy_loads!r}" + ) transmit_rate = min(max_rate, max(min_rate, next_load)) @staticmethod @@ -262,7 +296,8 @@ class PLRsearch(object): trace("chi0", chi0) if chi > 0: log_lps = math.log( - load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread) + load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread + ) trace("big loss direct log_lps", log_lps) else: two_positive = log_plus(chi, 2 * chi0 - log_2) @@ -359,8 +394,8 @@ class PLRsearch(object): @staticmethod def find_critical_rate( - trace, lfit_func, min_rate, max_rate, loss_ratio_target, - mrr, spread): + trace, lfit_func, min_rate, max_rate, loss_ratio_target, mrr, spread + ): """Given ratio target and parameters, return the achieving offered load. This is basically an inverse function to lfit_func @@ -385,7 +420,7 @@ class PLRsearch(object): :type lfit_func: Function from 3 floats to float. :type min_rate: float :type max_rate: float - :type log_lps_target: float + :type loss_ratio_target: float :type mrr: float :type spread: float :returns: Load [pps] which achieves the target with given parameters. @@ -397,7 +432,7 @@ class PLRsearch(object): loss_ratio = -1 while loss_ratio != loss_ratio_target: rate = (rate_hi + rate_lo) / 2.0 - if rate == rate_hi or rate == rate_lo: + if rate in (rate_hi, rate_lo): break loss_rate = math.exp(lfit_func(trace, rate, mrr, spread)) loss_ratio = loss_rate / rate @@ -417,23 +452,31 @@ class PLRsearch(object): Integrator assumes uniform distribution, but over different parameters. Weight and likelihood are used interchangeably here anyway. - Each trial has an offered load, a duration and a loss count. - Fitting function is used to compute the average loss per second. - Poisson distribution (with average loss per trial) is used + Each trial has an intended load, a sent count and a loss count + (probably counting unsent packets as loss, as they signal + the load is too high for the traffic generator). + The fitting function is used to compute the average loss rate. + Geometric distribution (with average loss per trial) is used to get likelihood of one trial result, the overal likelihood is a product of all trial likelihoods. As likelihoods can be extremely small, logarithms are tracked instead. - TODO: Copy ReceiveRateMeasurement from MLRsearch. + The current implementation does not use direct loss rate + from the fitting function, as the input and output units may not match + (e.g. intended load in TCP transactions, loss in packets). + Instead, the expected average loss is scaled according to the number + of packets actually sent. + + TODO: Copy MeasurementResult from MLRsearch. :param trace: A multiprocessing-friendly logging function (closure). :param lfit_func: Fitting function, typically lfit_spread or lfit_erf. - :param result_list: List of trial measurement results. + :param trial_result_list: List of trial measurement results. :param mrr: The mrr parameter for the fitting function. - :param spread: The spread parameter for the fittinmg function. + :param spread: The spread parameter for the fitting function. :type trace: function (str, object) -> None :type lfit_func: Function from 3 floats to float. - :type result_list: list of MLRsearch.ReceiveRateMeasurement + :type trial_result_list: list of MLRsearch.MeasurementResult :type mrr: float :type spread: float :returns: Logarithm of result weight for given function and parameters. @@ -443,26 +486,37 @@ class PLRsearch(object): trace("log_weight for mrr", mrr) trace("spread", spread) for result in trial_result_list: - trace("for tr", result.target_tr) - trace("lc", result.loss_count) - trace("d", result.duration) - log_avg_loss_per_second = lfit_func( - trace, result.target_tr, mrr, spread) - log_avg_loss_per_trial = ( - log_avg_loss_per_second + math.log(result.duration)) - # Poisson probability computation works nice for logarithms. - log_trial_likelihood = ( - result.loss_count * log_avg_loss_per_trial - - math.exp(log_avg_loss_per_trial)) - log_trial_likelihood -= math.lgamma(1 + result.loss_count) + trace("for tr", result.intended_load) + trace("plc", result.plr_loss_count) + trace("d", result.intended_duration) + # _rel_ values use units of intended_load (transactions per second). + log_avg_rel_loss_per_second = lfit_func( + trace, result.intended_load, mrr, spread + ) + # _abs_ values use units of loss count (maybe packets). + # There can be multiple packets per transaction. + log_avg_abs_loss_per_trial = log_avg_rel_loss_per_second + math.log( + result.offered_count / result.intended_load + ) + # Geometric probability computation for logarithms. + log_trial_likelihood = log_plus(0.0, -log_avg_abs_loss_per_trial) + log_trial_likelihood *= -result.plr_loss_count + log_trial_likelihood -= log_plus(0.0, +log_avg_abs_loss_per_trial) log_likelihood += log_trial_likelihood - trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial)) + trace("avg_loss_per_trial", math.exp(log_avg_abs_loss_per_trial)) trace("log_trial_likelihood", log_trial_likelihood) return log_likelihood def measure_and_compute( - self, trial_duration, transmit_rate, trial_result_list, - min_rate, max_rate, focus_trackers=(None, None), max_samples=None): + self, + trial_duration, + transmit_rate, + trial_result_list, + min_rate, + max_rate, + focus_trackers=(None, None), + max_samples=None, + ): """Perform both measurement and computation at once. High level steps: Prepare and launch computation worker processes, @@ -503,7 +557,7 @@ class PLRsearch(object): :param max_samples: Limit for integrator samples, for debugging. :type trial_duration: float :type transmit_rate: float - :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement + :type trial_result_list: list of MLRsearch.MeasurementResult :type min_rate: float :type max_rate: float :type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker @@ -512,12 +566,11 @@ class PLRsearch(object): :rtype: _ComputeResult """ logging.debug( - "measure_and_compute started with self %(self)r, trial_duration " - "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, " - "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r", - {"self": self, "dur": trial_duration, "tr": transmit_rate, - "trl": trial_result_list, "mr": max_rate, "track": focus_trackers, - "ms": max_samples}) + f"measure_and_compute started with self {self!r}, trial_duration " + f"{trial_duration!r}, transmit_rate {transmit_rate!r}, " + f"trial_result_list {trial_result_list!r}, max_rate {max_rate!r}, " + f"focus_trackers {focus_trackers!r}, max_samples {max_samples!r}" + ) # Preparation phase. dimension = 2 stretch_focus_tracker, erf_focus_tracker = focus_trackers @@ -536,15 +589,28 @@ class PLRsearch(object): start computation, return the boss pipe end. :param fitting_function: lfit_erf or lfit_stretch. - :param bias_avg: Tuple of floats to start searching around. - :param bias_cov: Covariance matrix defining initial focus shape. + :param focus_tracker: Tracker initialized to speed up the numeric + computation. :type fitting_function: Function from 3 floats to float. - :type bias_avg: 2-tuple of floats - :type bias_cov: 2-tuple of 2-tuples of floats + :type focus_tracker: None or stat_trackers.VectorStatTracker :returns: Boss end of communication pipe. :rtype: multiprocessing.Connection """ + boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() + # Starting the worker first. Contrary to documentation + # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.connection.Connection + # sending of large object without active listener on the other side + # results in a deadlock, not in a ValueError. + # See https://stackoverflow.com/questions/15137292/large-objects-and-multiprocessing-pipes-and-send + worker = multiprocessing.Process( + target=Integrator.try_estimate_nd, + args=(worker_pipe_end, 5.0, self.trace_enabled), + ) + worker.daemon = True + worker.start() + + # Only now it is safe to send the function to compute with. def value_logweight_func(trace, x_mrr, x_spread): """Return log of critical rate and log of likelihood. @@ -579,27 +645,29 @@ class PLRsearch(object): mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0 spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr)) logweight = self.log_weight( - trace, fitting_function, trial_result_list, mrr, spread) - value = math.log(self.find_critical_rate( - trace, fitting_function, min_rate, max_rate, - self.packet_loss_ratio_target, mrr, spread)) + trace, fitting_function, trial_result_list, mrr, spread + ) + value = math.log( + self.find_critical_rate( + trace, + fitting_function, + min_rate, + max_rate, + self.packet_loss_ratio_target, + mrr, + spread, + ) + ) return value, logweight dilled_function = dill.dumps(value_logweight_func) - boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() boss_pipe_end.send( - (dimension, dilled_function, focus_tracker, max_samples)) - worker = multiprocessing.Process( - target=Integrator.try_estimate_nd, args=( - worker_pipe_end, 10.0, self.trace_enabled)) - worker.daemon = True - worker.start() + (dimension, dilled_function, focus_tracker, max_samples) + ) return boss_pipe_end - erf_pipe = start_computing( - self.lfit_erf, erf_focus_tracker) - stretch_pipe = start_computing( - self.lfit_stretch, stretch_focus_tracker) + erf_pipe = start_computing(self.lfit_erf, erf_focus_tracker) + stretch_pipe = start_computing(self.lfit_stretch, stretch_focus_tracker) # Measurement phase. measurement = self.measurer.measure(trial_duration, transmit_rate) @@ -621,40 +689,52 @@ class PLRsearch(object): and number of samples used for this iteration. :rtype: _PartialResult """ - pipe.send(None) + # If worker encountered an exception, we get it in the recv below, + # but send will report a broken pipe. + # EAFP says we should ignore the error (instead of polling first). + # https://devblogs.microsoft.com/python + # /idiomatic-python-eafp-versus-lbyl/ + try: + pipe.send(None) + except BrokenPipeError: + pass if not pipe.poll(10.0): - raise RuntimeError( - "Worker {name} did not finish!".format(name=name)) + raise RuntimeError(f"Worker {name} did not finish!") result_or_traceback = pipe.recv() try: - value_tracker, focus_tracker, debug_list, trace_list, sampls = ( - result_or_traceback) - except ValueError: + ( + value_tracker, + focus_tracker, + debug_list, + trace_list, + sampls, + ) = result_or_traceback + except ValueError as exc: raise RuntimeError( - "Worker {name} failed with the following traceback:\n{tr}" - .format(name=name, tr=result_or_traceback)) - logging.info("Logs from worker %(name)r:", {"name": name}) + f"Worker {name} failed with the following traceback:\n" + f"{result_or_traceback}" + ) from exc + logging.info(f"Logs from worker {name!r}:") for message in debug_list: logging.info(message) for message in trace_list: logging.debug(message) - logging.debug("trackers: value %(val)r focus %(foc)r", { - "val": value_tracker, "foc": focus_tracker}) + logging.debug( + f"trackers: value {value_tracker!r} focus {focus_tracker!r}" + ) return _PartialResult(value_tracker, focus_tracker, sampls) stretch_result = stop_computing("stretch", stretch_pipe) erf_result = stop_computing("erf", erf_pipe) result = PLRsearch._get_result(measurement, stretch_result, erf_result) logging.info( - "measure_and_compute finished with trial result %(res)r " - "avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r " - "new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r " - "erf samples %(es)r", - {"res": result.measurement, - "avg": result.avg, "stdev": result.stdev, - "a1": result.stretch_exp_avg, "a2": result.erf_exp_avg, - "nt": result.trackers, "ot": old_trackers, - "ss": stretch_result.samples, "es": erf_result.samples}) + f"measure_and_compute finished with trial result " + f"{result.measurement!r} avg {result.avg!r} stdev {result.stdev!r} " + f"stretch {result.stretch_exp_avg!r} erf {result.erf_exp_avg!r} " + f"new trackers {result.trackers!r} old trackers {old_trackers!r} " + f"stretch samples {stretch_result.samples!r} erf samples " + f"{erf_result.samples!r}" + ) return result @staticmethod @@ -667,7 +747,7 @@ class PLRsearch(object): :param measurement: The trial measurement obtained during computation. :param stretch_result: Computation output for stretch fitting function. :param erf_result: Computation output for erf fitting function. - :type measurement: ReceiveRateMeasurement + :type measurement: MeasurementResult :type stretch_result: _PartialResult :type erf_result: _PartialResult :returns: Combined results. @@ -692,7 +772,8 @@ class PLRsearch(object): # Named tuples, for multiple local variables to be passed as return value. _PartialResult = namedtuple( - "_PartialResult", "value_tracker focus_tracker samples") + "_PartialResult", "value_tracker focus_tracker samples" +) """Two stat trackers and sample counter. :param value_tracker: Tracker for the value (critical load) being integrated. @@ -705,7 +786,8 @@ _PartialResult = namedtuple( _ComputeResult = namedtuple( "_ComputeResult", - "measurement avg stdev stretch_exp_avg erf_exp_avg trackers") + "measurement avg stdev stretch_exp_avg erf_exp_avg trackers", +) """Measurement, 4 computation result values, pair of trackers. :param measurement: The trial measurement result obtained during computation. @@ -714,7 +796,7 @@ _ComputeResult = namedtuple( :param stretch_exp_avg: Stretch fitting function estimate average exponentiated. :param erf_exp_avg: Erf fitting function estimate average, exponentiated. :param trackers: Pair of focus trackers to start next iteration with. -:type measurement: ReceiveRateMeasurement +:type measurement: MeasurementResult :type avg: float :type stdev: float :type stretch_exp_avg: float