Imported Upstream version 16.07-rc4
[deb_dpdk.git] / app / test / autotest_runner.py
1 #!/usr/bin/python
2
3 #   BSD LICENSE
4 #
5 #   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 #   All rights reserved.
7 #
8 #   Redistribution and use in source and binary forms, with or without
9 #   modification, are permitted provided that the following conditions
10 #   are met:
11 #
12 #     * Redistributions of source code must retain the above copyright
13 #       notice, this list of conditions and the following disclaimer.
14 #     * Redistributions in binary form must reproduce the above copyright
15 #       notice, this list of conditions and the following disclaimer in
16 #       the documentation and/or other materials provided with the
17 #       distribution.
18 #     * Neither the name of Intel Corporation nor the names of its
19 #       contributors may be used to endorse or promote products derived
20 #       from this software without specific prior written permission.
21 #
22 #   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 #   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 #   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 #   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 #   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 #   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 #   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 #   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 #   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 #   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 #   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34 # The main logic behind running autotests in parallel
35
36 import multiprocessing, subprocess, sys, pexpect, re, time, os, StringIO, csv
37
38 # wait for prompt
39 def wait_prompt(child):
40         try:
41                 child.sendline()
42                 result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
43                         timeout = 120)
44         except:
45                 return False
46         if result == 0:
47                 return True
48         else:
49                 return False
50
51 # run a test group
52 # each result tuple in results list consists of:
53 #   result value (0 or -1)
54 #   result string
55 #   test name
56 #   total test run time (double)
57 #   raw test log
58 #   test report (if not available, should be None)
59 #
60 # this function needs to be outside AutotestRunner class
61 # because otherwise Pool won't work (or rather it will require
62 # quite a bit of effort to make it work).
63 def run_test_group(cmdline, test_group):
64         results = []
65         child = None
66         start_time = time.time()
67         startuplog = None
68
69         # run test app
70         try:
71                 # prepare logging of init
72                 startuplog = StringIO.StringIO()
73
74                 print >>startuplog, "\n%s %s\n" % ("="*20, test_group["Prefix"])
75                 print >>startuplog, "\ncmdline=%s" % cmdline
76
77                 child = pexpect.spawn(cmdline, logfile=startuplog)
78
79                 # wait for target to boot
80                 if not wait_prompt(child):
81                         child.close()
82
83                         results.append((-1, "Fail [No prompt]", "Start %s" % test_group["Prefix"],
84                                 time.time() - start_time, startuplog.getvalue(), None))
85
86                         # mark all tests as failed
87                         for test in test_group["Tests"]:
88                                 results.append((-1, "Fail [No prompt]", test["Name"],
89                                 time.time() - start_time, "", None))
90                         # exit test
91                         return results
92
93         except:
94                 results.append((-1, "Fail [Can't run]", "Start %s" % test_group["Prefix"],
95                                 time.time() - start_time, startuplog.getvalue(), None))
96
97                 # mark all tests as failed
98                 for t in test_group["Tests"]:
99                         results.append((-1, "Fail [Can't run]", t["Name"],
100                                 time.time() - start_time, "", None))
101                 # exit test
102                 return results
103
104         # startup was successful
105         results.append((0, "Success", "Start %s" % test_group["Prefix"],
106                 time.time() - start_time, startuplog.getvalue(), None))
107
108         # parse the binary for available test commands
109         binary = cmdline.split()[0]
110         stripped = 'not stripped' not in subprocess.check_output(['file', binary])
111         if not stripped:
112                 symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
113                 avail_cmds = re.findall('test_register_(\w+)', symbols)
114
115         # run all tests in test group
116         for test in test_group["Tests"]:
117
118                 # create log buffer for each test
119                 # in multiprocessing environment, the logging would be
120                 # interleaved and will create a mess, hence the buffering
121                 logfile = StringIO.StringIO()
122                 child.logfile = logfile
123
124                 result = ()
125
126                 # make a note when the test started
127                 start_time = time.time()
128
129                 try:
130                         # print test name to log buffer
131                         print >>logfile, "\n%s %s\n" % ("-"*20, test["Name"])
132
133                         # run test function associated with the test
134                         if stripped or test["Command"] in avail_cmds:
135                                 result = test["Func"](child, test["Command"])
136                         else:
137                                 result = (0, "Skipped [Not Available]")
138
139                         # make a note when the test was finished
140                         end_time = time.time()
141
142                         # append test data to the result tuple
143                         result += (test["Name"], end_time - start_time,
144                                 logfile.getvalue())
145
146                         # call report function, if any defined, and supply it with
147                         # target and complete log for test run
148                         if test["Report"]:
149                                 report = test["Report"](self.target, log)
150
151                                 # append report to results tuple
152                                 result += (report,)
153                         else:
154                                 # report is None
155                                 result += (None,)
156                 except:
157                         # make a note when the test crashed
158                         end_time = time.time()
159
160                         # mark test as failed
161                         result = (-1, "Fail [Crash]", test["Name"],
162                                 end_time - start_time, logfile.getvalue(), None)
163                 finally:
164                         # append the results to the results list
165                         results.append(result)
166
167         # regardless of whether test has crashed, try quitting it
168         try:
169                 child.sendline("quit")
170                 child.close()
171         # if the test crashed, just do nothing instead
172         except:
173                 # nop
174                 pass
175
176         # return test results
177         return results
178
179
180
181
182
183 # class representing an instance of autotests run
184 class AutotestRunner:
185         cmdline = ""
186         parallel_test_groups = []
187         non_parallel_test_groups = []
188         logfile = None
189         csvwriter = None
190         target = ""
191         start = None
192         n_tests = 0
193         fails = 0
194         log_buffers = []
195         blacklist = []
196         whitelist = []
197
198
199         def __init__(self, cmdline, target, blacklist, whitelist):
200                 self.cmdline = cmdline
201                 self.target = target
202                 self.blacklist = blacklist
203                 self.whitelist = whitelist
204
205                 # log file filename
206                 logfile = "%s.log" % target
207                 csvfile = "%s.csv" % target
208
209                 self.logfile = open(logfile, "w")
210                 csvfile = open(csvfile, "w")
211                 self.csvwriter = csv.writer(csvfile)
212
213                 # prepare results table
214                 self.csvwriter.writerow(["test_name","test_result","result_str"])
215
216
217
218         # set up cmdline string
219         def __get_cmdline(self, test):
220                 cmdline = self.cmdline
221
222                 # append memory limitations for each test
223                 # otherwise tests won't run in parallel
224                 if not "i686" in self.target:
225                         cmdline += " --socket-mem=%s"% test["Memory"]
226                 else:
227                         # affinitize startup so that tests don't fail on i686
228                         cmdline = "taskset 1 " + cmdline
229                         cmdline += " -m " + str(sum(map(int,test["Memory"].split(","))))
230
231                 # set group prefix for autotest group
232                 # otherwise they won't run in parallel
233                 cmdline += " --file-prefix=%s"% test["Prefix"]
234
235                 return cmdline
236
237
238
239         def add_parallel_test_group(self,test_group):
240                 self.parallel_test_groups.append(test_group)
241
242         def add_non_parallel_test_group(self,test_group):
243                 self.non_parallel_test_groups.append(test_group)
244
245
246         def __process_results(self, results):
247                 # this iterates over individual test results
248                 for i, result in enumerate(results):
249
250                         # increase total number of tests that were run
251                         # do not include "start" test
252                         if i > 0:
253                                 self.n_tests += 1
254
255                         # unpack result tuple
256                         test_result, result_str, test_name, \
257                                 test_time, log, report = result
258
259                         # get total run time
260                         cur_time = time.time()
261                         total_time = int(cur_time - self.start)
262
263                         # print results, test run time and total time since start
264                         print ("%s:" % test_name).ljust(30),
265                         print result_str.ljust(29),
266                         print "[%02dm %02ds]" % (test_time / 60, test_time % 60),
267
268                         # don't print out total time every line, it's the same anyway
269                         if i == len(results) - 1:
270                                 print "[%02dm %02ds]" % (total_time / 60, total_time % 60)
271                         else:
272                                 print ""
273
274                         # if test failed and it wasn't a "start" test
275                         if test_result < 0 and not i == 0:
276                                 self.fails += 1
277
278                         # collect logs
279                         self.log_buffers.append(log)
280
281                         # create report if it exists
282                         if report:
283                                 try:
284                                         f = open("%s_%s_report.rst" % (self.target,test_name), "w")
285                                 except IOError:
286                                         print "Report for %s could not be created!" % test_name
287                                 else:
288                                         with f:
289                                                 f.write(report)
290
291                         # write test result to CSV file
292                         if i != 0:
293                                 self.csvwriter.writerow([test_name, test_result, result_str])
294
295
296
297
298         # this function iterates over test groups and removes each
299         # test that is not in whitelist/blacklist
300         def __filter_groups(self, test_groups):
301                 groups_to_remove = []
302
303                 # filter out tests from parallel test groups
304                 for i, test_group in enumerate(test_groups):
305
306                         # iterate over a copy so that we could safely delete individual tests
307                         for test in test_group["Tests"][:]:
308                                 test_id = test["Command"]
309
310                                 # dump tests are specified in full e.g. "Dump_mempool"
311                                 if "_autotest" in test_id:
312                                         test_id = test_id[:-len("_autotest")]
313
314                                 # filter out blacklisted/whitelisted tests
315                                 if self.blacklist and test_id in self.blacklist:
316                                         test_group["Tests"].remove(test)
317                                         continue
318                                 if self.whitelist and test_id not in self.whitelist:
319                                         test_group["Tests"].remove(test)
320                                         continue
321
322                         # modify or remove original group
323                         if len(test_group["Tests"]) > 0:
324                                 test_groups[i] = test_group
325                         else:
326                                 # remember which groups should be deleted
327                                 # put the numbers backwards so that we start
328                                 # deleting from the end, not from the beginning
329                                 groups_to_remove.insert(0, i)
330
331                 # remove test groups that need to be removed
332                 for i in groups_to_remove:
333                         del test_groups[i]
334
335                 return test_groups
336
337
338
339         # iterate over test groups and run tests associated with them
340         def run_all_tests(self):
341                 # filter groups
342                 self.parallel_test_groups = \
343                         self.__filter_groups(self.parallel_test_groups)
344                 self.non_parallel_test_groups = \
345                         self.__filter_groups(self.non_parallel_test_groups)
346
347                 # create a pool of worker threads
348                 pool = multiprocessing.Pool(processes=1)
349
350                 results = []
351
352                 # whatever happens, try to save as much logs as possible
353                 try:
354
355                         # create table header
356                         print ""
357                         print "Test name".ljust(30),
358                         print "Test result".ljust(29),
359                         print "Test".center(9),
360                         print "Total".center(9)
361                         print "=" * 80
362
363                         # make a note of tests start time
364                         self.start = time.time()
365
366                         # assign worker threads to run test groups
367                         for test_group in self.parallel_test_groups:
368                                 result = pool.apply_async(run_test_group,
369                                         [self.__get_cmdline(test_group), test_group])
370                                 results.append(result)
371
372                         # iterate while we have group execution results to get
373                         while len(results) > 0:
374
375                                 # iterate over a copy to be able to safely delete results
376                                 # this iterates over a list of group results
377                                 for group_result in results[:]:
378
379                                         # if the thread hasn't finished yet, continue
380                                         if not group_result.ready():
381                                                 continue
382
383                                         res = group_result.get()
384
385                                         self.__process_results(res)
386
387                                         # remove result from results list once we're done with it
388                                         results.remove(group_result)
389
390                         # run non_parallel tests. they are run one by one, synchronously
391                         for test_group in self.non_parallel_test_groups:
392                                 group_result = run_test_group(self.__get_cmdline(test_group), test_group)
393
394                                 self.__process_results(group_result)
395
396                         # get total run time
397                         cur_time = time.time()
398                         total_time = int(cur_time - self.start)
399
400                         # print out summary
401                         print "=" * 80
402                         print "Total run time: %02dm %02ds" % (total_time / 60, total_time % 60)
403                         if self.fails != 0:
404                                 print "Number of failed tests: %s" % str(self.fails)
405
406                         # write summary to logfile
407                         self.logfile.write("Summary\n")
408                         self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
409                         self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
410                         self.logfile.write("Failed tests: ".ljust(15) + "%i\n" % self.fails)
411                 except:
412                         print "Exception occured"
413                         print sys.exc_info()
414                         self.fails = 1
415
416                 # drop logs from all executions to a logfile
417                 for buf in self.log_buffers:
418                         self.logfile.write(buf.replace("\r",""))
419
420                 log_buffers = []
421
422                 return self.fails