Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame^] | 1 | #!/usr/bin/env python2 |
| 2 | """Generate summary report for ChromeOS toolchain waterfalls.""" |
| 3 | |
| 4 | # Desired future features (to be added): |
| 5 | # - arguments to allow generating only the main waterfall report, |
| 6 | # or only the rotating builder reports, or only the failures |
| 7 | # report; or the waterfall reports without the failures report. |
| 8 | # - Better way of figuring out which dates/builds to generate |
| 9 | # reports for: probably an argument specifying a date or a date |
| 10 | # range, then use something like the new buildbot utils to |
| 11 | # query the build logs to find the right build numbers for the |
| 12 | # builders for the specified dates. |
| 13 | # - Store/get the json/data files in mobiletc-prebuild's x20 area. |
| 14 | # - Update data in json file to reflect, for each testsuite, which |
| 15 | # tests are not expected to run on which boards; update this |
| 16 | # script to use that data appropriately. |
| 17 | # - Make sure user's prodaccess is up-to-date before trying to use |
| 18 | # this script. |
| 19 | # - Add some nice formatting/highlighting to reports. |
| 20 | |
| 21 | from __future__ import print_function |
| 22 | |
| 23 | import json |
| 24 | import os |
| 25 | import sys |
| 26 | import time |
| 27 | |
| 28 | from cros_utils import command_executer |
| 29 | |
| 30 | # All the test suites whose data we might want for the reports. |
| 31 | TESTS = ( |
| 32 | ('bvt-inline', 'HWTest'), |
| 33 | ('bvt-cq', 'HWTest'), |
| 34 | ('toolchain-tests', 'HWTest'), |
| 35 | ('security', 'HWTest'), |
| 36 | ('kernel_daily_regression', 'HWTest'), |
| 37 | ('kernel_daily_benchmarks', 'HWTest'),) |
| 38 | |
| 39 | # The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM |
| 40 | # LISTED IN THE REPORT. |
| 41 | WATERFALL_BUILDERS = [ |
| 42 | 'amd64-gcc-toolchain', 'arm-gcc-toolchain', 'arm64-gcc-toolchain', |
| 43 | 'x86-gcc-toolchain', 'amd64-llvm-toolchain', 'arm-llvm-toolchain', |
| 44 | 'arm64-llvm-toolchain', 'x86-llvm-toolchain', 'amd64-llvm-next-toolchain', |
| 45 | 'arm-llvm-next-toolchain', 'arm64-llvm-next-toolchain', |
| 46 | 'x86-llvm-next-toolchain' |
| 47 | ] |
| 48 | |
| 49 | ROLE_ACCOUNT = 'mobiletc-prebuild' |
| 50 | DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' |
| 51 | DOWNLOAD_DIR = '/tmp/waterfall-logs' |
| 52 | MAX_SAVE_RECORDS = 5 |
| 53 | BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR |
| 54 | ROTATING_BUILDERS = ['gcc_toolchain', 'llvm_toolchain'] |
| 55 | |
| 56 | # For int-to-string date conversion. Note, the index of the month in this |
| 57 | # list needs to correspond to the month's integer value. i.e. 'Sep' must |
| 58 | # be as MONTHS[9]. |
| 59 | MONTHS = [ |
| 60 | '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', |
| 61 | 'Nov', 'Dec' |
| 62 | ] |
| 63 | |
| 64 | |
| 65 | def format_date(int_date): |
| 66 | """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD""" |
| 67 | |
| 68 | if int_date == 0: |
| 69 | return 'today' |
| 70 | |
| 71 | tmp_date = int_date |
| 72 | day = tmp_date % 100 |
| 73 | tmp_date = tmp_date / 100 |
| 74 | month = tmp_date % 100 |
| 75 | year = tmp_date / 100 |
| 76 | |
| 77 | month_str = MONTHS[month] |
| 78 | date_str = '%d-%s-%d' % (year, month_str, day) |
| 79 | return date_str |
| 80 | |
| 81 | |
| 82 | def GetValueIfExists(dictionary, keyval, unknown_value='[??/ ?? /??]'): |
| 83 | """Returns value from dictionary, if it's already there. |
| 84 | |
| 85 | Check dictionary to see if keyval is in it, and if so |
| 86 | return the corresponding value; otherwise return string |
| 87 | for unknown value. |
| 88 | """ |
| 89 | retval = unknown_value |
| 90 | if keyval in dictionary: |
| 91 | retval = dictionary[keyval] |
| 92 | return retval |
| 93 | |
| 94 | |
| 95 | def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date): |
| 96 | """Write out the actual formatted report.""" |
| 97 | |
| 98 | filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date) |
| 99 | |
| 100 | date_string = '' |
| 101 | date_list = report_dict['date'] |
| 102 | num_dates = len(date_list) |
| 103 | i = 0 |
| 104 | for d in date_list: |
| 105 | date_string += d |
| 106 | if i < num_dates - 1: |
| 107 | date_string += ', ' |
| 108 | i += 1 |
| 109 | |
| 110 | if waterfall_type == 'main': |
| 111 | report_list = WATERFALL_BUILDERS |
| 112 | else: |
| 113 | report_list = report_dict.keys() |
| 114 | |
| 115 | with open(filename, 'w') as out_file: |
| 116 | # Write Report Header |
| 117 | out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' % |
| 118 | (waterfall_type, date_string)) |
| 119 | out_file.write(' ' |
| 120 | ' kernel kernel\n') |
| 121 | out_file.write(' Build bvt- bvt-cq ' |
| 122 | 'toolchain- security daily daily\n') |
| 123 | out_file.write(' status inline ' |
| 124 | ' tests regression benchmarks\n') |
| 125 | out_file.write(' [P/ F/ DR]* [P/ F /DR]* ' |
| 126 | '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n') |
| 127 | |
| 128 | # Write daily waterfall status section. |
| 129 | for i in range(0, len(report_list)): |
| 130 | builder = report_list[i] |
| 131 | |
| 132 | if builder not in report_dict: |
| 133 | out_file.write('Unable to find information for %s.\n\n' % builder) |
| 134 | continue |
| 135 | |
| 136 | build_dict = report_dict[builder] |
| 137 | status = GetValueIfExists(build_dict, 'build_status', unknown_value='bad') |
| 138 | inline = GetValueIfExists(build_dict, 'bvt-inline') |
| 139 | cq = GetValueIfExists(build_dict, 'bvt-cq') |
| 140 | if 'x86' not in builder: |
| 141 | toolchain = GetValueIfExists(build_dict, 'toolchain-tests') |
| 142 | security = GetValueIfExists(build_dict, 'security') |
| 143 | if 'gcc' in builder: |
| 144 | regression = GetValueIfExists(build_dict, 'kernel_daily_regression') |
| 145 | bench = GetValueIfExists(build_dict, 'kernel_daily_benchmarks') |
| 146 | out_file.write('%25s %3s %s %s %s %s %s %s\n' % (builder, status, |
| 147 | inline, cq, |
| 148 | toolchain, security, |
| 149 | regression, bench)) |
| 150 | else: |
| 151 | out_file.write('%25s %3s %s %s %s %s\n' % (builder, status, inline, |
| 152 | cq, toolchain, security)) |
| 153 | else: |
| 154 | out_file.write('%25s %3s %s %s\n' % (builder, status, inline, cq)) |
| 155 | if 'build_link' in build_dict: |
| 156 | out_file.write('%s\n\n' % build_dict['build_link']) |
| 157 | |
| 158 | out_file.write('\n\n*P = Number of tests in suite that Passed; F = ' |
| 159 | 'Number of tests in suite that Failed; DR = Number of tests' |
| 160 | ' in suite that Didn\'t Run.\n') |
| 161 | |
| 162 | # Write failure report section. |
| 163 | out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) |
| 164 | |
| 165 | # We want to sort the errors and output them in order of the ones that occur |
| 166 | # most often. So we have to collect the data about all of them, then sort |
| 167 | # it. |
| 168 | error_groups = [] |
| 169 | for suite in fail_dict: |
| 170 | suite_dict = fail_dict[suite] |
| 171 | if suite_dict: |
| 172 | for test in suite_dict: |
| 173 | test_dict = suite_dict[test] |
| 174 | for err_msg in test_dict: |
| 175 | err_list = test_dict[err_msg] |
| 176 | sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) |
| 177 | err_group = [len(sorted_list), suite, test, err_msg, sorted_list] |
| 178 | error_groups.append(err_group) |
| 179 | |
| 180 | # Sort the errors by the number of errors of each type. Then output them in |
| 181 | # order. |
| 182 | sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) |
| 183 | for i in range(0, len(sorted_errors)): |
| 184 | err_group = sorted_errors[i] |
| 185 | suite = err_group[1] |
| 186 | test = err_group[2] |
| 187 | err_msg = err_group[3] |
| 188 | err_list = err_group[4] |
| 189 | out_file.write('Suite: %s\n' % suite) |
| 190 | out_file.write(' %s (%d failures)\n' % (test, len(err_list))) |
| 191 | out_file.write(' (%s)\n' % err_msg) |
| 192 | for i in range(0, len(err_list)): |
| 193 | err = err_list[i] |
| 194 | out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], |
| 195 | err[2])) |
| 196 | out_file.write('\n') |
| 197 | |
| 198 | print('Report generated in %s.' % filename) |
| 199 | |
| 200 | |
| 201 | def UpdateReport(report_dict, builder, test, report_date, build_link, |
| 202 | test_summary, board): |
| 203 | """Update the data in our report dictionary with current test's data.""" |
| 204 | |
| 205 | if 'date' not in report_dict: |
| 206 | report_dict['date'] = [report_date] |
| 207 | elif report_date not in report_dict['date']: |
| 208 | # It is possible that some of the builders started/finished on different |
| 209 | # days, so we allow for multiple dates in the reports. |
| 210 | report_dict['date'].append(report_date) |
| 211 | |
| 212 | build_key = '' |
| 213 | if builder == 'gcc_toolchain': |
| 214 | build_key = '%s-gcc-toolchain' % board |
| 215 | elif builder == 'llvm_toolchain': |
| 216 | build_key = '%s-llvm-toolchain' % board |
| 217 | else: |
| 218 | build_key = builder |
| 219 | |
| 220 | if build_key not in report_dict.keys(): |
| 221 | build_dict = dict() |
| 222 | else: |
| 223 | build_dict = report_dict[build_key] |
| 224 | |
| 225 | if 'build_link' not in build_dict: |
| 226 | build_dict['build_link'] = build_link |
| 227 | |
| 228 | if 'date' not in build_dict: |
| 229 | build_dict['date'] = report_date |
| 230 | |
| 231 | if 'board' in build_dict and build_dict['board'] != board: |
| 232 | raise RuntimeError('Error: Two different boards (%s,%s) in one build (%s)!' |
| 233 | % (board, build_dict['board'], build_link)) |
| 234 | build_dict['board'] = board |
| 235 | |
| 236 | # Check to see if we already have a build status for this build_key |
| 237 | status = '' |
| 238 | if 'build_status' in build_dict.keys(): |
| 239 | # Use current build_status, unless current test failed (see below). |
| 240 | status = build_dict['build_status'] |
| 241 | |
| 242 | if not test_summary: |
| 243 | # Current test data was not available, so something was bad with build. |
| 244 | build_dict['build_status'] = 'bad' |
| 245 | build_dict[test] = '[ no data ]' |
| 246 | else: |
| 247 | build_dict[test] = test_summary |
| 248 | if not status: |
| 249 | # Current test ok; no other data, so assume build was ok. |
| 250 | build_dict['build_status'] = 'ok' |
| 251 | |
| 252 | report_dict[build_key] = build_dict |
| 253 | |
| 254 | |
| 255 | def UpdateBuilds(builds): |
| 256 | """Update the data in our build-data.txt file.""" |
| 257 | |
| 258 | # The build data file records the last build number for which we |
| 259 | # generated a report. When we generate the next report, we read |
| 260 | # this data and increment it to get the new data; when we finish |
| 261 | # generating the reports, we write the updated values into this file. |
| 262 | # NOTE: One side effect of doing this at the end: If the script |
| 263 | # fails in the middle of generating a report, this data does not get |
| 264 | # updated. |
| 265 | with open(BUILD_DATA_FILE, 'w') as fp: |
| 266 | gcc_max = 0 |
| 267 | llvm_max = 0 |
| 268 | for b in builds: |
| 269 | if b[0] == 'gcc_toolchain': |
| 270 | gcc_max = max(gcc_max, b[1]) |
| 271 | elif b[0] == 'llvm_toolchain': |
| 272 | llvm_max = max(llvm_max, b[1]) |
| 273 | else: |
| 274 | fp.write('%s,%d\n' % (b[0], b[1])) |
| 275 | if gcc_max > 0: |
| 276 | fp.write('gcc_toolchain,%d\n' % gcc_max) |
| 277 | if llvm_max > 0: |
| 278 | fp.write('llvm_toolchain,%d\n' % llvm_max) |
| 279 | |
| 280 | |
| 281 | def GetBuilds(): |
| 282 | """Read build-data.txt to determine values for current report.""" |
| 283 | |
| 284 | # Read the values of the last builds used to generate a report, and |
| 285 | # increment them appropriately, to get values for generating the |
| 286 | # current report. (See comments in UpdateBuilds). |
| 287 | with open(BUILD_DATA_FILE, 'r') as fp: |
| 288 | lines = fp.readlines() |
| 289 | |
| 290 | builds = [] |
| 291 | for l in lines: |
| 292 | l = l.rstrip() |
| 293 | words = l.split(',') |
| 294 | builder = words[0] |
| 295 | build = int(words[1]) |
| 296 | builds.append((builder, build + 1)) |
| 297 | # NOTE: We are assuming here that there are always 2 daily builds in |
| 298 | # each of the rotating builders. I am not convinced this is a valid |
| 299 | # assumption. |
| 300 | if builder == 'gcc_toolchain' or builder == 'llvm_toolchain': |
| 301 | builds.append((builder, build + 2)) |
| 302 | |
| 303 | return builds |
| 304 | |
| 305 | |
| 306 | def RecordFailures(failure_dict, platform, suite, builder, int_date, log_file, |
| 307 | build_num, failed): |
| 308 | """Read and update the stored data about test failures.""" |
| 309 | |
| 310 | # Get the dictionary for this particular test suite from the failures |
| 311 | # dictionary. |
| 312 | suite_dict = failure_dict[suite] |
| 313 | |
| 314 | # Read in the entire log file for this test/build. |
| 315 | with open(log_file, 'r') as in_file: |
| 316 | lines = in_file.readlines() |
| 317 | |
| 318 | # Update the entries in the failure dictionary for each test within this suite |
| 319 | # that failed. |
| 320 | for test in failed: |
| 321 | # Check to see if there is already an entry in the suite dictionary for this |
| 322 | # test; if so use that, otherwise create a new entry. |
| 323 | if test in suite_dict: |
| 324 | test_dict = suite_dict[test] |
| 325 | else: |
| 326 | test_dict = dict() |
| 327 | # Parse the lines from the log file, looking for lines that indicate this |
| 328 | # test failed. |
| 329 | msg = '' |
| 330 | for l in lines: |
| 331 | words = l.split() |
| 332 | if len(words) < 3: |
| 333 | continue |
| 334 | if ((words[0] == test and words[1] == 'ERROR:') or |
| 335 | (words[0] == 'provision' and words[1] == 'FAIL:')): |
| 336 | words = words[2:] |
| 337 | # Get the error message for the failure. |
| 338 | msg = ' '.join(words) |
| 339 | if not msg: |
| 340 | msg = 'Unknown_Error' |
| 341 | |
| 342 | # Look for an existing entry for this error message in the test dictionary. |
| 343 | # If found use that, otherwise create a new entry for this error message. |
| 344 | if msg in test_dict: |
| 345 | error_list = test_dict[msg] |
| 346 | else: |
| 347 | error_list = list() |
| 348 | # Create an entry for this new failure |
| 349 | new_item = [int_date, platform, builder, build_num] |
| 350 | # Add this failure to the error list if it's not already there. |
| 351 | if new_item not in error_list: |
| 352 | error_list.append([int_date, platform, builder, build_num]) |
| 353 | # Sort the error list by date. |
| 354 | error_list.sort(key=lambda x: x[0]) |
| 355 | # Calculate the earliest date to save; delete records for older failures. |
| 356 | earliest_date = int_date - MAX_SAVE_RECORDS |
| 357 | i = 0 |
| 358 | while error_list[i][0] <= earliest_date and i < len(error_list): |
| 359 | i += 1 |
| 360 | if i > 0: |
| 361 | error_list = error_list[i:] |
| 362 | # Save the error list in the test's dictionary, keyed on error_msg. |
| 363 | test_dict[msg] = error_list |
| 364 | |
| 365 | # Save the updated test dictionary in the test_suite dictionary. |
| 366 | suite_dict[test] = test_dict |
| 367 | |
| 368 | # Save the updated test_suite dictionary in the failure dictionary. |
| 369 | failure_dict[suite] = suite_dict |
| 370 | |
| 371 | |
| 372 | def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder, |
| 373 | build_num, build_link): |
| 374 | """Parse the log file from the given builder, build_num and test. |
| 375 | |
| 376 | Also adds the results for this test to our test results dictionary, |
| 377 | and calls RecordFailures, to update our test failure data. |
| 378 | """ |
| 379 | |
| 380 | lines = [] |
| 381 | with open(log_file, 'r') as infile: |
| 382 | lines = infile.readlines() |
| 383 | |
| 384 | passed = {} |
| 385 | failed = {} |
| 386 | not_run = {} |
| 387 | date = '' |
| 388 | status = '' |
| 389 | board = '' |
| 390 | num_provision_errors = 0 |
| 391 | build_ok = True |
| 392 | afe_line = '' |
| 393 | |
| 394 | for line in lines: |
| 395 | if line.rstrip() == '<title>404 Not Found</title>': |
| 396 | print('Warning: File for %s (build number %d), %s was not found.' % |
| 397 | (builder, build_num, test)) |
| 398 | build_ok = False |
| 399 | break |
| 400 | if '[ PASSED ]' in line: |
| 401 | test_name = line.split()[0] |
| 402 | if test_name != 'Suite': |
| 403 | passed[test_name] = True |
| 404 | elif '[ FAILED ]' in line: |
| 405 | test_name = line.split()[0] |
| 406 | if test_name == 'provision': |
| 407 | num_provision_errors += 1 |
| 408 | not_run[test_name] = True |
| 409 | elif test_name != 'Suite': |
| 410 | failed[test_name] = True |
| 411 | elif line.startswith('started: '): |
| 412 | date = line.rstrip() |
| 413 | date = date[9:] |
| 414 | date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y') |
| 415 | int_date = ( |
| 416 | date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday) |
| 417 | date = time.strftime('%a %b %d %Y', date_obj) |
| 418 | elif line.startswith('status: '): |
| 419 | status = line.rstrip() |
| 420 | words = status.split(':') |
| 421 | status = words[-1] |
| 422 | elif line.startswith('@@@STEP_LINK@Link to suite@'): |
| 423 | afe_line = line.rstrip() |
| 424 | words = afe_line.split('@') |
| 425 | for w in words: |
| 426 | if w.startswith('http'): |
| 427 | afe_line = w |
| 428 | afe_line = afe_line.replace('&', '&') |
| 429 | elif 'INFO: RunCommand:' in line: |
| 430 | words = line.split() |
| 431 | for i in range(0, len(words) - 1): |
| 432 | if words[i] == '--board': |
| 433 | board = words[i + 1] |
| 434 | |
| 435 | test_dict = test_data_dict[test] |
| 436 | test_list = test_dict['tests'] |
| 437 | |
| 438 | if build_ok: |
| 439 | for t in test_list: |
| 440 | if not t in passed and not t in failed: |
| 441 | not_run[t] = True |
| 442 | |
| 443 | total_pass = len(passed) |
| 444 | total_fail = len(failed) |
| 445 | total_notrun = len(not_run) |
| 446 | |
| 447 | else: |
| 448 | total_pass = 0 |
| 449 | total_fail = 0 |
| 450 | total_notrun = 0 |
| 451 | status = 'Not found.' |
| 452 | if not build_ok: |
| 453 | return [], date, board, 0 |
| 454 | |
| 455 | build_dict = dict() |
| 456 | build_dict['id'] = build_num |
| 457 | build_dict['builder'] = builder |
| 458 | build_dict['date'] = date |
| 459 | build_dict['build_link'] = build_link |
| 460 | build_dict['total_pass'] = total_pass |
| 461 | build_dict['total_fail'] = total_fail |
| 462 | build_dict['total_not_run'] = total_notrun |
| 463 | build_dict['afe_job_link'] = afe_line |
| 464 | build_dict['provision_errors'] = num_provision_errors |
| 465 | |
| 466 | # Use YYYYMMDD (integer) as the build record key |
| 467 | if build_ok: |
| 468 | if board in test_dict: |
| 469 | board_dict = test_dict[board] |
| 470 | else: |
| 471 | board_dict = dict() |
| 472 | board_dict[int_date] = build_dict |
| 473 | |
| 474 | # Only keep the last 5 records (based on date) |
| 475 | keys_list = board_dict.keys() |
| 476 | if len(keys_list) > MAX_SAVE_RECORDS: |
| 477 | min_key = min(keys_list) |
| 478 | del board_dict[min_key] |
| 479 | |
| 480 | # Make sure changes get back into the main dictionary |
| 481 | test_dict[board] = board_dict |
| 482 | test_data_dict[test] = test_dict |
| 483 | |
| 484 | if len(failed) > 0: |
| 485 | RecordFailures(failure_dict, board, test, builder, int_date, log_file, |
| 486 | build_num, failed) |
| 487 | |
| 488 | summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun) |
| 489 | |
| 490 | return summary_result, date, board, int_date |
| 491 | |
| 492 | |
| 493 | def DownloadLogFile(builder, buildnum, test, test_family): |
| 494 | |
| 495 | ce = command_executer.GetCommandExecuter() |
| 496 | os.system('mkdir -p %s/%s/%s' % (DOWNLOAD_DIR, builder, test)) |
| 497 | if builder == 'gcc_toolchain' or builder == 'llvm_toolchain': |
| 498 | source = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 499 | '/builders/%s/builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 500 | (builder, buildnum, test_family, test)) |
| 501 | build_link = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 502 | '/builders/%s/builds/%d' % (builder, buildnum)) |
| 503 | else: |
| 504 | source = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s/' |
| 505 | 'builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 506 | (builder, buildnum, test_family, test)) |
| 507 | build_link = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s' |
| 508 | '/builds/%d' % (builder, buildnum)) |
| 509 | |
| 510 | target = '%s/%s/%s/%d' % (DOWNLOAD_DIR, builder, test, buildnum) |
| 511 | if not os.path.isfile(target) or os.path.getsize(target) == 0: |
| 512 | cmd = 'sso_client %s > %s' % (source, target) |
| 513 | status = ce.RunCommand(cmd) |
| 514 | if status != 0: |
| 515 | return '', '' |
| 516 | |
| 517 | return target, build_link |
| 518 | |
| 519 | |
| 520 | def Main(): |
| 521 | """Main function for this script.""" |
| 522 | |
| 523 | test_data_dict = dict() |
| 524 | failure_dict = dict() |
| 525 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file: |
| 526 | test_data_dict = json.load(input_file) |
| 527 | |
| 528 | with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp: |
| 529 | failure_dict = json.load(fp) |
| 530 | |
| 531 | builds = GetBuilds() |
| 532 | |
| 533 | waterfall_report_dict = dict() |
| 534 | rotating_report_dict = dict() |
| 535 | int_date = 0 |
| 536 | for test_desc in TESTS: |
| 537 | test, test_family = test_desc |
| 538 | for build in builds: |
| 539 | (builder, buildnum) = build |
| 540 | if test.startswith('kernel') and 'llvm' in builder: |
| 541 | continue |
| 542 | if 'x86' in builder and not test.startswith('bvt'): |
| 543 | continue |
| 544 | target, build_link = DownloadLogFile(builder, buildnum, test, test_family) |
| 545 | |
| 546 | if os.path.exists(target): |
| 547 | test_summary, report_date, board, tmp_date = ParseLogFile( |
| 548 | target, test_data_dict, failure_dict, test, builder, buildnum, |
| 549 | build_link) |
| 550 | |
| 551 | if tmp_date != 0: |
| 552 | int_date = tmp_date |
| 553 | |
| 554 | if builder in ROTATING_BUILDERS: |
| 555 | UpdateReport(rotating_report_dict, builder, test, report_date, |
| 556 | build_link, test_summary, board) |
| 557 | else: |
| 558 | UpdateReport(waterfall_report_dict, builder, test, report_date, |
| 559 | build_link, test_summary, board) |
| 560 | |
| 561 | if waterfall_report_dict: |
| 562 | GenerateWaterfallReport(waterfall_report_dict, failure_dict, 'main', |
| 563 | int_date) |
| 564 | if rotating_report_dict: |
| 565 | GenerateWaterfallReport(rotating_report_dict, failure_dict, 'rotating', |
| 566 | int_date) |
| 567 | |
| 568 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file: |
| 569 | json.dump(test_data_dict, out_file, indent=2) |
| 570 | |
| 571 | with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file: |
| 572 | json.dump(failure_dict, out_file, indent=2) |
| 573 | |
| 574 | UpdateBuilds(builds) |
| 575 | |
| 576 | |
| 577 | if __name__ == '__main__': |
| 578 | Main() |
| 579 | sys.exit(0) |