3 # Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
10 # * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 # * Redistributions in binary form must reproduce the above copyright
13 # notice, this list of conditions and the following disclaimer in
14 # the documentation and/or other materials provided with the
16 # * Neither the name of Intel Corporation nor the names of its
17 # contributors may be used to endorse or promote products derived
18 # from this software without specific prior written permission.
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 # The main logic behind running autotests in parallel
34 from __future__ import print_function
37 import multiprocessing
47 def wait_prompt(child):
50 result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
60 # each result tuple in results list consists of:
61 # result value (0 or -1)
64 # total test run time (double)
66 # test report (if not available, should be None)
68 # this function needs to be outside AutotestRunner class
69 # because otherwise Pool won't work (or rather it will require
70 # quite a bit of effort to make it work).
73 def run_test_group(cmdline, target, test_group):
76 start_time = time.time()
81 # prepare logging of init
82 startuplog = StringIO.StringIO()
84 print("\n%s %s\n" % ("=" * 20, test_group["Prefix"]), file=startuplog)
85 print("\ncmdline=%s" % cmdline, file=startuplog)
87 child = pexpect.spawn(cmdline, logfile=startuplog)
89 # wait for target to boot
90 if not wait_prompt(child):
95 "Start %s" % test_group["Prefix"],
96 time.time() - start_time,
97 startuplog.getvalue(),
100 # mark all tests as failed
101 for test in test_group["Tests"]:
102 results.append((-1, "Fail [No prompt]", test["Name"],
103 time.time() - start_time, "", None))
110 "Start %s" % test_group["Prefix"],
111 time.time() - start_time,
112 startuplog.getvalue(),
115 # mark all tests as failed
116 for t in test_group["Tests"]:
117 results.append((-1, "Fail [Can't run]", t["Name"],
118 time.time() - start_time, "", None))
122 # startup was successful
123 results.append((0, "Success", "Start %s" % test_group["Prefix"],
124 time.time() - start_time, startuplog.getvalue(), None))
126 # run all tests in test group
127 for test in test_group["Tests"]:
129 # create log buffer for each test
130 # in multiprocessing environment, the logging would be
131 # interleaved and will create a mess, hence the buffering
132 logfile = StringIO.StringIO()
133 child.logfile = logfile
137 # make a note when the test started
138 start_time = time.time()
141 # print test name to log buffer
142 print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
144 # run test function associated with the test
145 result = test["Func"](child, test["Command"])
147 # make a note when the test was finished
148 end_time = time.time()
150 log = logfile.getvalue()
152 # append test data to the result tuple
153 result += (test["Name"], end_time - start_time, log)
155 # call report function, if any defined, and supply it with
156 # target and complete log for test run
158 report = test["Report"](target, log)
160 # append report to results tuple
166 # make a note when the test crashed
167 end_time = time.time()
169 # mark test as failed
170 result = (-1, "Fail [Crash]", test["Name"],
171 end_time - start_time, logfile.getvalue(), None)
173 # append the results to the results list
174 results.append(result)
176 # regardless of whether test has crashed, try quitting it
178 child.sendline("quit")
180 # if the test crashed, just do nothing instead
185 # return test results
189 # class representing an instance of autotests run
190 class AutotestRunner:
192 parallel_test_groups = []
193 non_parallel_test_groups = []
204 def __init__(self, cmdline, target, blacklist, whitelist):
205 self.cmdline = cmdline
207 self.binary = cmdline.split()[0]
208 self.blacklist = blacklist
209 self.whitelist = whitelist
213 logfile = "%s.log" % target
214 csvfile = "%s.csv" % target
216 self.logfile = open(logfile, "w")
217 csvfile = open(csvfile, "w")
218 self.csvwriter = csv.writer(csvfile)
220 # prepare results table
221 self.csvwriter.writerow(["test_name", "test_result", "result_str"])
223 # set up cmdline string
224 def __get_cmdline(self, test):
225 cmdline = self.cmdline
227 # append memory limitations for each test
228 # otherwise tests won't run in parallel
229 if "i686" not in self.target:
230 cmdline += " --socket-mem=%s" % test["Memory"]
232 # affinitize startup so that tests don't fail on i686
233 cmdline = "taskset 1 " + cmdline
234 cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
236 # set group prefix for autotest group
237 # otherwise they won't run in parallel
238 cmdline += " --file-prefix=%s" % test["Prefix"]
242 def add_parallel_test_group(self, test_group):
243 self.parallel_test_groups.append(test_group)
245 def add_non_parallel_test_group(self, test_group):
246 self.non_parallel_test_groups.append(test_group)
248 def __process_results(self, results):
249 # this iterates over individual test results
250 for i, result in enumerate(results):
252 # increase total number of tests that were run
253 # do not include "start" test
257 # unpack result tuple
258 test_result, result_str, test_name, \
259 test_time, log, report = result
262 cur_time = time.time()
263 total_time = int(cur_time - self.start)
265 # print results, test run time and total time since start
266 result = ("%s:" % test_name).ljust(30)
267 result += result_str.ljust(29)
268 result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
270 # don't print out total time every line, it's the same anyway
271 if i == len(results) - 1:
273 "[%02dm %02ds]" % (total_time / 60, total_time % 60))
277 # if test failed and it wasn't a "start" test
278 if test_result < 0 and not i == 0:
282 self.log_buffers.append(log)
284 # create report if it exists
287 f = open("%s_%s_report.rst" %
288 (self.target, test_name), "w")
290 print("Report for %s could not be created!" % test_name)
295 # write test result to CSV file
297 self.csvwriter.writerow([test_name, test_result, result_str])
299 # this function checks individual test and decides if this test should be in
300 # the group by comparing it against whitelist/blacklist. it also checks if
301 # the test is compiled into the binary, and marks it as skipped if necessary
302 def __filter_test(self, test):
303 test_cmd = test["Command"]
306 # dump tests are specified in full e.g. "Dump_mempool"
307 if "_autotest" in test_id:
308 test_id = test_id[:-len("_autotest")]
310 # filter out blacklisted/whitelisted tests
311 if self.blacklist and test_id in self.blacklist:
313 if self.whitelist and test_id not in self.whitelist:
316 # if test wasn't compiled in, remove it as well
318 # parse the binary for available test commands
319 stripped = 'not stripped' not in \
320 subprocess.check_output(['file', self.binary])
322 symbols = subprocess.check_output(['nm',
323 self.binary]).decode('utf-8')
324 avail_cmds = re.findall('test_register_(\w+)', symbols)
326 if test_cmd not in avail_cmds:
328 result = 0, "Skipped [Not compiled]", test_id, 0, "", None
329 self.skipped.append(tuple(result))
334 def __filter_group(self, group):
335 group["Tests"] = list(filter(self.__filter_test, group["Tests"]))
336 return len(group["Tests"]) > 0
338 # iterate over test groups and run tests associated with them
339 def run_all_tests(self):
341 # for each test group, check all tests against the filter, then remove
342 # all groups that don't have any tests
343 self.parallel_test_groups = list(
344 filter(self.__filter_group,
345 self.parallel_test_groups)
347 self.non_parallel_test_groups = list(
348 filter(self.__filter_group,
349 self.non_parallel_test_groups)
352 # create a pool of worker threads
353 pool = multiprocessing.Pool(processes=1)
357 # whatever happens, try to save as much logs as possible
360 # create table header
362 print("Test name".ljust(30) + "Test result".ljust(29) +
363 "Test".center(9) + "Total".center(9))
366 # print out skipped autotests if there were any
367 if len(self.skipped):
368 print("Skipped autotests:")
370 # print out any skipped tests
371 for result in self.skipped:
372 # unpack result tuple
373 test_result, result_str, test_name, _, _, _ = result
374 self.csvwriter.writerow([test_name, test_result,
377 t = ("%s:" % test_name).ljust(30)
378 t += result_str.ljust(29)
383 # make a note of tests start time
384 self.start = time.time()
386 print("Parallel autotests:")
387 # assign worker threads to run test groups
388 for test_group in self.parallel_test_groups:
389 result = pool.apply_async(run_test_group,
390 [self.__get_cmdline(test_group),
393 results.append(result)
395 # iterate while we have group execution results to get
396 while len(results) > 0:
398 # iterate over a copy to be able to safely delete results
399 # this iterates over a list of group results
400 for group_result in results[:]:
402 # if the thread hasn't finished yet, continue
403 if not group_result.ready():
406 res = group_result.get()
408 self.__process_results(res)
410 # remove result from results list once we're done with it
411 results.remove(group_result)
413 print("Non-parallel autotests:")
414 # run non_parallel tests. they are run one by one, synchronously
415 for test_group in self.non_parallel_test_groups:
416 group_result = run_test_group(
417 self.__get_cmdline(test_group), self.target, test_group)
419 self.__process_results(group_result)
422 cur_time = time.time()
423 total_time = int(cur_time - self.start)
427 print("Total run time: %02dm %02ds" % (total_time / 60,
430 print("Number of failed tests: %s" % str(self.fails))
432 # write summary to logfile
433 self.logfile.write("Summary\n")
434 self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
435 self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
436 self.logfile.write("Failed tests: ".ljust(
437 15) + "%i\n" % self.fails)
439 print("Exception occurred")
440 print(sys.exc_info())
443 # drop logs from all executions to a logfile
444 for buf in self.log_buffers:
445 self.logfile.write(buf.replace("\r", ""))