X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FPLRsearch%2FPLRsearch.py;h=0e78cc936d6ddcbaaccb680408d1ce094dda7e1d;hb=d07f6cae7f18c1513650d4cb690115d60201e704;hp=cdfd3081494ac9c246162e60d6011029bcb2165f;hpb=b6fbffad32515ccf94404680cb5280c2cb561af5;p=csit.git diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py index cdfd308149..0e78cc936d 100644 --- a/resources/libraries/python/PLRsearch/PLRsearch.py +++ b/resources/libraries/python/PLRsearch/PLRsearch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Cisco and/or its affiliates. +# Copyright (c) 2022 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -426,13 +426,21 @@ class PLRsearch: Integrator assumes uniform distribution, but over different parameters. Weight and likelihood are used interchangeably here anyway. - Each trial has an offered load, a duration and a loss count. - Fitting function is used to compute the average loss per second. - Poisson distribution (with average loss per trial) is used + Each trial has an intended load, a sent count and a loss count + (probably counting unsent packets as loss, as they signal + the load is too high for the traffic generator). + The fitting function is used to compute the average loss rate. + Geometric distribution (with average loss per trial) is used to get likelihood of one trial result, the overal likelihood is a product of all trial likelihoods. As likelihoods can be extremely small, logarithms are tracked instead. + The current implementation does not use direct loss rate + from the fitting function, as the input and output units may not match + (e.g. intended load in TCP transactions, loss in packets). + Instead, the expected average loss is scaled according to the number + of packets actually sent. + TODO: Copy ReceiveRateMeasurement from MLRsearch. :param trace: A multiprocessing-friendly logging function (closure). @@ -556,6 +564,20 @@ class PLRsearch: :rtype: multiprocessing.Connection """ + boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() + # Starting the worker first. Contrary to documentation + # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.connection.Connection + # sending of large object without active listener on the other side + # results in a deadlock, not in a ValueError. + # See https://stackoverflow.com/questions/15137292/large-objects-and-multiprocessing-pipes-and-send + worker = multiprocessing.Process( + target=Integrator.try_estimate_nd, + args=(worker_pipe_end, 10.0, self.trace_enabled) + ) + worker.daemon = True + worker.start() + + # Only now it is safe to send the function to compute with. def value_logweight_func(trace, x_mrr, x_spread): """Return log of critical rate and log of likelihood. @@ -601,15 +623,6 @@ class PLRsearch: return value, logweight dilled_function = dill.dumps(value_logweight_func) - boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() - # Do not send yet, run the worker first to avoid a deadlock. - # See https://stackoverflow.com/a/15716500 - worker = multiprocessing.Process( - target=Integrator.try_estimate_nd, - args=(worker_pipe_end, 10.0, self.trace_enabled) - ) - worker.daemon = True - worker.start() boss_pipe_end.send( (dimension, dilled_function, focus_tracker, max_samples) )