Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 1 | #!/usr/bin/env python2 |
| 2 | """Generate summary report for ChromeOS toolchain waterfalls.""" |
| 3 | |
| 4 | # Desired future features (to be added): |
| 5 | # - arguments to allow generating only the main waterfall report, |
| 6 | # or only the rotating builder reports, or only the failures |
| 7 | # report; or the waterfall reports without the failures report. |
| 8 | # - Better way of figuring out which dates/builds to generate |
| 9 | # reports for: probably an argument specifying a date or a date |
| 10 | # range, then use something like the new buildbot utils to |
| 11 | # query the build logs to find the right build numbers for the |
| 12 | # builders for the specified dates. |
| 13 | # - Store/get the json/data files in mobiletc-prebuild's x20 area. |
| 14 | # - Update data in json file to reflect, for each testsuite, which |
| 15 | # tests are not expected to run on which boards; update this |
| 16 | # script to use that data appropriately. |
| 17 | # - Make sure user's prodaccess is up-to-date before trying to use |
| 18 | # this script. |
| 19 | # - Add some nice formatting/highlighting to reports. |
| 20 | |
| 21 | from __future__ import print_function |
| 22 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 23 | import argparse |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 24 | import getpass |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 25 | import json |
| 26 | import os |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 27 | import shutil |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 28 | import sys |
| 29 | import time |
| 30 | |
| 31 | from cros_utils import command_executer |
| 32 | |
| 33 | # All the test suites whose data we might want for the reports. |
| 34 | TESTS = ( |
| 35 | ('bvt-inline', 'HWTest'), |
| 36 | ('bvt-cq', 'HWTest'), |
| 37 | ('toolchain-tests', 'HWTest'), |
| 38 | ('security', 'HWTest'), |
| 39 | ('kernel_daily_regression', 'HWTest'), |
| 40 | ('kernel_daily_benchmarks', 'HWTest'),) |
| 41 | |
| 42 | # The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM |
| 43 | # LISTED IN THE REPORT. |
| 44 | WATERFALL_BUILDERS = [ |
| 45 | 'amd64-gcc-toolchain', 'arm-gcc-toolchain', 'arm64-gcc-toolchain', |
| 46 | 'x86-gcc-toolchain', 'amd64-llvm-toolchain', 'arm-llvm-toolchain', |
| 47 | 'arm64-llvm-toolchain', 'x86-llvm-toolchain', 'amd64-llvm-next-toolchain', |
| 48 | 'arm-llvm-next-toolchain', 'arm64-llvm-next-toolchain', |
| 49 | 'x86-llvm-next-toolchain' |
| 50 | ] |
| 51 | |
Manoj Gupta | 9c0b33b | 2016-12-15 14:52:25 -0800 | [diff] [blame^] | 52 | DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' |
| 53 | ARCHIVE_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-reports/' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 54 | DOWNLOAD_DIR = '/tmp/waterfall-logs' |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 55 | MAX_SAVE_RECORDS = 7 |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 56 | BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 57 | GCC_ROTATING_BUILDER = 'gcc_toolchain' |
| 58 | LLVM_ROTATING_BUILDER = 'llvm_next_toolchain' |
| 59 | ROTATING_BUILDERS = [GCC_ROTATING_BUILDER, LLVM_ROTATING_BUILDER] |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 60 | |
| 61 | # For int-to-string date conversion. Note, the index of the month in this |
| 62 | # list needs to correspond to the month's integer value. i.e. 'Sep' must |
| 63 | # be as MONTHS[9]. |
| 64 | MONTHS = [ |
| 65 | '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', |
| 66 | 'Nov', 'Dec' |
| 67 | ] |
| 68 | |
| 69 | |
| 70 | def format_date(int_date): |
| 71 | """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD""" |
| 72 | |
| 73 | if int_date == 0: |
| 74 | return 'today' |
| 75 | |
| 76 | tmp_date = int_date |
| 77 | day = tmp_date % 100 |
| 78 | tmp_date = tmp_date / 100 |
| 79 | month = tmp_date % 100 |
| 80 | year = tmp_date / 100 |
| 81 | |
| 82 | month_str = MONTHS[month] |
| 83 | date_str = '%d-%s-%d' % (year, month_str, day) |
| 84 | return date_str |
| 85 | |
| 86 | |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 87 | def EmailReport(report_file, report_type, date): |
| 88 | subject = '%s Waterfall Summary report, %s' % (report_type, date) |
| 89 | email_to = getpass.getuser() |
Rahul Chaudhry | 213f3c0 | 2016-12-06 10:47:05 -0800 | [diff] [blame] | 90 | sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr' |
| 91 | command = ('%s --to=%s@google.com --subject="%s" --body_file=%s' % |
| 92 | (sendgmr_path, email_to, subject, report_file)) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 93 | command_executer.GetCommandExecuter().RunCommand(command) |
| 94 | |
| 95 | |
| 96 | def PruneOldFailures(failure_dict, int_date): |
| 97 | earliest_date = int_date - MAX_SAVE_RECORDS |
| 98 | for suite in failure_dict: |
| 99 | suite_dict = failure_dict[suite] |
| 100 | test_keys_to_remove = [] |
| 101 | for test in suite_dict: |
| 102 | test_dict = suite_dict[test] |
| 103 | msg_keys_to_remove = [] |
| 104 | for msg in test_dict: |
| 105 | fails = test_dict[msg] |
| 106 | i = 0 |
| 107 | while i < len(fails) and fails[i][0] <= earliest_date: |
| 108 | i += 1 |
| 109 | new_fails = fails[i:] |
| 110 | test_dict[msg] = new_fails |
| 111 | if len(new_fails) == 0: |
| 112 | msg_keys_to_remove.append(msg) |
| 113 | |
| 114 | for k in msg_keys_to_remove: |
| 115 | del test_dict[k] |
| 116 | |
| 117 | suite_dict[test] = test_dict |
| 118 | if len(test_dict) == 0: |
| 119 | test_keys_to_remove.append(test) |
| 120 | |
| 121 | for k in test_keys_to_remove: |
| 122 | del suite_dict[k] |
| 123 | |
| 124 | failure_dict[suite] = suite_dict |
| 125 | |
| 126 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 127 | def GenerateFailuresReport(fail_dict, date): |
| 128 | filename = 'waterfall_report.failures.%s.txt' % date |
| 129 | date_string = format_date(date) |
| 130 | with open(filename, 'w') as out_file: |
| 131 | # Write failure report section. |
| 132 | out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) |
| 133 | |
| 134 | # We want to sort the errors and output them in order of the ones that occur |
| 135 | # most often. So we have to collect the data about all of them, then sort |
| 136 | # it. |
| 137 | error_groups = [] |
| 138 | for suite in fail_dict: |
| 139 | suite_dict = fail_dict[suite] |
| 140 | if suite_dict: |
| 141 | for test in suite_dict: |
| 142 | test_dict = suite_dict[test] |
| 143 | for err_msg in test_dict: |
| 144 | err_list = test_dict[err_msg] |
| 145 | sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) |
| 146 | err_group = [len(sorted_list), suite, test, err_msg, sorted_list] |
| 147 | error_groups.append(err_group) |
| 148 | |
| 149 | # Sort the errors by the number of errors of each type. Then output them in |
| 150 | # order. |
| 151 | sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) |
| 152 | for i in range(0, len(sorted_errors)): |
| 153 | err_group = sorted_errors[i] |
| 154 | suite = err_group[1] |
| 155 | test = err_group[2] |
| 156 | err_msg = err_group[3] |
| 157 | err_list = err_group[4] |
| 158 | out_file.write('Suite: %s\n' % suite) |
| 159 | out_file.write(' %s (%d failures)\n' % (test, len(err_list))) |
| 160 | out_file.write(' (%s)\n' % err_msg) |
| 161 | for i in range(0, len(err_list)): |
| 162 | err = err_list[i] |
| 163 | out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], |
| 164 | err[2])) |
| 165 | out_file.write('\n') |
| 166 | |
| 167 | print('Report generated in %s.' % filename) |
| 168 | return filename |
| 169 | |
| 170 | |
| 171 | def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date, |
| 172 | omit_failures): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 173 | """Write out the actual formatted report.""" |
| 174 | |
| 175 | filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date) |
| 176 | |
| 177 | date_string = '' |
| 178 | date_list = report_dict['date'] |
| 179 | num_dates = len(date_list) |
| 180 | i = 0 |
| 181 | for d in date_list: |
| 182 | date_string += d |
| 183 | if i < num_dates - 1: |
| 184 | date_string += ', ' |
| 185 | i += 1 |
| 186 | |
| 187 | if waterfall_type == 'main': |
| 188 | report_list = WATERFALL_BUILDERS |
| 189 | else: |
| 190 | report_list = report_dict.keys() |
| 191 | |
| 192 | with open(filename, 'w') as out_file: |
| 193 | # Write Report Header |
| 194 | out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' % |
| 195 | (waterfall_type, date_string)) |
| 196 | out_file.write(' ' |
| 197 | ' kernel kernel\n') |
| 198 | out_file.write(' Build bvt- bvt-cq ' |
| 199 | 'toolchain- security daily daily\n') |
| 200 | out_file.write(' status inline ' |
| 201 | ' tests regression benchmarks\n') |
| 202 | out_file.write(' [P/ F/ DR]* [P/ F /DR]* ' |
| 203 | '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n') |
| 204 | |
| 205 | # Write daily waterfall status section. |
| 206 | for i in range(0, len(report_list)): |
| 207 | builder = report_list[i] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 208 | if builder == 'date': |
| 209 | continue |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 210 | |
| 211 | if builder not in report_dict: |
| 212 | out_file.write('Unable to find information for %s.\n\n' % builder) |
| 213 | continue |
| 214 | |
| 215 | build_dict = report_dict[builder] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 216 | status = build_dict.get('build_status', 'bad') |
| 217 | inline = build_dict.get('bvt-inline', '[??/ ?? /??]') |
| 218 | cq = build_dict.get('bvt-cq', '[??/ ?? /??]') |
| 219 | inline_color = build_dict.get('bvt-inline-color', '') |
| 220 | cq_color = build_dict.get('bvt-cq-color', '') |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 221 | if 'x86' not in builder: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 222 | toolchain = build_dict.get('toolchain-tests', '[??/ ?? /??]') |
| 223 | security = build_dict.get('security', '[??/ ?? /??]') |
| 224 | toolchain_color = build_dict.get('toolchain-tests-color', '') |
| 225 | security_color = build_dict.get('security-color', '') |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 226 | if 'gcc' in builder: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 227 | regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]') |
| 228 | bench = build_dict.get('kernel_daily_benchmarks', '[??/ ?? /??]') |
| 229 | regression_color = build_dict.get('kernel_daily_regression-color', '') |
| 230 | bench_color = build_dict.get('kernel_daily_benchmarks-color', '') |
| 231 | out_file.write(' %6s %6s' |
| 232 | ' %6s %6s %6s %6s\n' % |
| 233 | (inline_color, cq_color, toolchain_color, |
| 234 | security_color, regression_color, bench_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 235 | out_file.write('%25s %3s %s %s %s %s %s %s\n' % (builder, status, |
| 236 | inline, cq, |
| 237 | toolchain, security, |
| 238 | regression, bench)) |
| 239 | else: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 240 | out_file.write(' %6s %6s' |
| 241 | ' %6s %6s\n' % (inline_color, cq_color, |
| 242 | toolchain_color, |
| 243 | security_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 244 | out_file.write('%25s %3s %s %s %s %s\n' % (builder, status, inline, |
| 245 | cq, toolchain, security)) |
| 246 | else: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 247 | out_file.write(' %6s %6s\n' % |
| 248 | (inline_color, cq_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 249 | out_file.write('%25s %3s %s %s\n' % (builder, status, inline, cq)) |
| 250 | if 'build_link' in build_dict: |
| 251 | out_file.write('%s\n\n' % build_dict['build_link']) |
| 252 | |
| 253 | out_file.write('\n\n*P = Number of tests in suite that Passed; F = ' |
| 254 | 'Number of tests in suite that Failed; DR = Number of tests' |
| 255 | ' in suite that Didn\'t Run.\n') |
| 256 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 257 | if omit_failures: |
| 258 | print('Report generated in %s.' % filename) |
| 259 | return filename |
| 260 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 261 | # Write failure report section. |
| 262 | out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) |
| 263 | |
| 264 | # We want to sort the errors and output them in order of the ones that occur |
| 265 | # most often. So we have to collect the data about all of them, then sort |
| 266 | # it. |
| 267 | error_groups = [] |
| 268 | for suite in fail_dict: |
| 269 | suite_dict = fail_dict[suite] |
| 270 | if suite_dict: |
| 271 | for test in suite_dict: |
| 272 | test_dict = suite_dict[test] |
| 273 | for err_msg in test_dict: |
| 274 | err_list = test_dict[err_msg] |
| 275 | sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) |
| 276 | err_group = [len(sorted_list), suite, test, err_msg, sorted_list] |
| 277 | error_groups.append(err_group) |
| 278 | |
| 279 | # Sort the errors by the number of errors of each type. Then output them in |
| 280 | # order. |
| 281 | sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) |
| 282 | for i in range(0, len(sorted_errors)): |
| 283 | err_group = sorted_errors[i] |
| 284 | suite = err_group[1] |
| 285 | test = err_group[2] |
| 286 | err_msg = err_group[3] |
| 287 | err_list = err_group[4] |
| 288 | out_file.write('Suite: %s\n' % suite) |
| 289 | out_file.write(' %s (%d failures)\n' % (test, len(err_list))) |
| 290 | out_file.write(' (%s)\n' % err_msg) |
| 291 | for i in range(0, len(err_list)): |
| 292 | err = err_list[i] |
| 293 | out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], |
| 294 | err[2])) |
| 295 | out_file.write('\n') |
| 296 | |
| 297 | print('Report generated in %s.' % filename) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 298 | return filename |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 299 | |
| 300 | |
| 301 | def UpdateReport(report_dict, builder, test, report_date, build_link, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 302 | test_summary, board, color): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 303 | """Update the data in our report dictionary with current test's data.""" |
| 304 | |
| 305 | if 'date' not in report_dict: |
| 306 | report_dict['date'] = [report_date] |
| 307 | elif report_date not in report_dict['date']: |
| 308 | # It is possible that some of the builders started/finished on different |
| 309 | # days, so we allow for multiple dates in the reports. |
| 310 | report_dict['date'].append(report_date) |
| 311 | |
| 312 | build_key = '' |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 313 | if builder == GCC_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 314 | build_key = '%s-gcc-toolchain' % board |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 315 | elif builder == LLVM_ROTATING_BUILDER: |
| 316 | build_key = '%s-llvm-next-toolchain' % board |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 317 | else: |
| 318 | build_key = builder |
| 319 | |
| 320 | if build_key not in report_dict.keys(): |
| 321 | build_dict = dict() |
| 322 | else: |
| 323 | build_dict = report_dict[build_key] |
| 324 | |
| 325 | if 'build_link' not in build_dict: |
| 326 | build_dict['build_link'] = build_link |
| 327 | |
| 328 | if 'date' not in build_dict: |
| 329 | build_dict['date'] = report_date |
| 330 | |
| 331 | if 'board' in build_dict and build_dict['board'] != board: |
| 332 | raise RuntimeError('Error: Two different boards (%s,%s) in one build (%s)!' |
| 333 | % (board, build_dict['board'], build_link)) |
| 334 | build_dict['board'] = board |
| 335 | |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 336 | color_key = '%s-color' % test |
| 337 | build_dict[color_key] = color |
| 338 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 339 | # Check to see if we already have a build status for this build_key |
| 340 | status = '' |
| 341 | if 'build_status' in build_dict.keys(): |
| 342 | # Use current build_status, unless current test failed (see below). |
| 343 | status = build_dict['build_status'] |
| 344 | |
| 345 | if not test_summary: |
| 346 | # Current test data was not available, so something was bad with build. |
| 347 | build_dict['build_status'] = 'bad' |
| 348 | build_dict[test] = '[ no data ]' |
| 349 | else: |
| 350 | build_dict[test] = test_summary |
| 351 | if not status: |
| 352 | # Current test ok; no other data, so assume build was ok. |
| 353 | build_dict['build_status'] = 'ok' |
| 354 | |
| 355 | report_dict[build_key] = build_dict |
| 356 | |
| 357 | |
| 358 | def UpdateBuilds(builds): |
| 359 | """Update the data in our build-data.txt file.""" |
| 360 | |
| 361 | # The build data file records the last build number for which we |
| 362 | # generated a report. When we generate the next report, we read |
| 363 | # this data and increment it to get the new data; when we finish |
| 364 | # generating the reports, we write the updated values into this file. |
| 365 | # NOTE: One side effect of doing this at the end: If the script |
| 366 | # fails in the middle of generating a report, this data does not get |
| 367 | # updated. |
| 368 | with open(BUILD_DATA_FILE, 'w') as fp: |
| 369 | gcc_max = 0 |
| 370 | llvm_max = 0 |
| 371 | for b in builds: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 372 | if b[0] == GCC_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 373 | gcc_max = max(gcc_max, b[1]) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 374 | elif b[0] == LLVM_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 375 | llvm_max = max(llvm_max, b[1]) |
| 376 | else: |
| 377 | fp.write('%s,%d\n' % (b[0], b[1])) |
| 378 | if gcc_max > 0: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 379 | fp.write('%s,%d\n' % (GCC_ROTATING_BUILDER, gcc_max)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 380 | if llvm_max > 0: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 381 | fp.write('%s,%d\n' % (LLVM_ROTATING_BUILDER, llvm_max)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 382 | |
| 383 | |
| 384 | def GetBuilds(): |
| 385 | """Read build-data.txt to determine values for current report.""" |
| 386 | |
| 387 | # Read the values of the last builds used to generate a report, and |
| 388 | # increment them appropriately, to get values for generating the |
| 389 | # current report. (See comments in UpdateBuilds). |
| 390 | with open(BUILD_DATA_FILE, 'r') as fp: |
| 391 | lines = fp.readlines() |
| 392 | |
| 393 | builds = [] |
| 394 | for l in lines: |
| 395 | l = l.rstrip() |
| 396 | words = l.split(',') |
| 397 | builder = words[0] |
| 398 | build = int(words[1]) |
| 399 | builds.append((builder, build + 1)) |
| 400 | # NOTE: We are assuming here that there are always 2 daily builds in |
| 401 | # each of the rotating builders. I am not convinced this is a valid |
| 402 | # assumption. |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 403 | if builder in ROTATING_BUILDERS: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 404 | builds.append((builder, build + 2)) |
| 405 | |
| 406 | return builds |
| 407 | |
| 408 | |
| 409 | def RecordFailures(failure_dict, platform, suite, builder, int_date, log_file, |
| 410 | build_num, failed): |
| 411 | """Read and update the stored data about test failures.""" |
| 412 | |
| 413 | # Get the dictionary for this particular test suite from the failures |
| 414 | # dictionary. |
| 415 | suite_dict = failure_dict[suite] |
| 416 | |
| 417 | # Read in the entire log file for this test/build. |
| 418 | with open(log_file, 'r') as in_file: |
| 419 | lines = in_file.readlines() |
| 420 | |
| 421 | # Update the entries in the failure dictionary for each test within this suite |
| 422 | # that failed. |
| 423 | for test in failed: |
| 424 | # Check to see if there is already an entry in the suite dictionary for this |
| 425 | # test; if so use that, otherwise create a new entry. |
| 426 | if test in suite_dict: |
| 427 | test_dict = suite_dict[test] |
| 428 | else: |
| 429 | test_dict = dict() |
| 430 | # Parse the lines from the log file, looking for lines that indicate this |
| 431 | # test failed. |
| 432 | msg = '' |
| 433 | for l in lines: |
| 434 | words = l.split() |
| 435 | if len(words) < 3: |
| 436 | continue |
| 437 | if ((words[0] == test and words[1] == 'ERROR:') or |
| 438 | (words[0] == 'provision' and words[1] == 'FAIL:')): |
| 439 | words = words[2:] |
| 440 | # Get the error message for the failure. |
| 441 | msg = ' '.join(words) |
| 442 | if not msg: |
| 443 | msg = 'Unknown_Error' |
| 444 | |
| 445 | # Look for an existing entry for this error message in the test dictionary. |
| 446 | # If found use that, otherwise create a new entry for this error message. |
| 447 | if msg in test_dict: |
| 448 | error_list = test_dict[msg] |
| 449 | else: |
| 450 | error_list = list() |
| 451 | # Create an entry for this new failure |
| 452 | new_item = [int_date, platform, builder, build_num] |
| 453 | # Add this failure to the error list if it's not already there. |
| 454 | if new_item not in error_list: |
| 455 | error_list.append([int_date, platform, builder, build_num]) |
| 456 | # Sort the error list by date. |
| 457 | error_list.sort(key=lambda x: x[0]) |
| 458 | # Calculate the earliest date to save; delete records for older failures. |
| 459 | earliest_date = int_date - MAX_SAVE_RECORDS |
| 460 | i = 0 |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 461 | while i < len(error_list) and error_list[i][0] <= earliest_date: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 462 | i += 1 |
| 463 | if i > 0: |
| 464 | error_list = error_list[i:] |
| 465 | # Save the error list in the test's dictionary, keyed on error_msg. |
| 466 | test_dict[msg] = error_list |
| 467 | |
| 468 | # Save the updated test dictionary in the test_suite dictionary. |
| 469 | suite_dict[test] = test_dict |
| 470 | |
| 471 | # Save the updated test_suite dictionary in the failure dictionary. |
| 472 | failure_dict[suite] = suite_dict |
| 473 | |
| 474 | |
| 475 | def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder, |
| 476 | build_num, build_link): |
| 477 | """Parse the log file from the given builder, build_num and test. |
| 478 | |
| 479 | Also adds the results for this test to our test results dictionary, |
| 480 | and calls RecordFailures, to update our test failure data. |
| 481 | """ |
| 482 | |
| 483 | lines = [] |
| 484 | with open(log_file, 'r') as infile: |
| 485 | lines = infile.readlines() |
| 486 | |
| 487 | passed = {} |
| 488 | failed = {} |
| 489 | not_run = {} |
| 490 | date = '' |
| 491 | status = '' |
| 492 | board = '' |
| 493 | num_provision_errors = 0 |
| 494 | build_ok = True |
| 495 | afe_line = '' |
| 496 | |
| 497 | for line in lines: |
| 498 | if line.rstrip() == '<title>404 Not Found</title>': |
| 499 | print('Warning: File for %s (build number %d), %s was not found.' % |
| 500 | (builder, build_num, test)) |
| 501 | build_ok = False |
| 502 | break |
| 503 | if '[ PASSED ]' in line: |
| 504 | test_name = line.split()[0] |
| 505 | if test_name != 'Suite': |
| 506 | passed[test_name] = True |
| 507 | elif '[ FAILED ]' in line: |
| 508 | test_name = line.split()[0] |
| 509 | if test_name == 'provision': |
| 510 | num_provision_errors += 1 |
| 511 | not_run[test_name] = True |
| 512 | elif test_name != 'Suite': |
| 513 | failed[test_name] = True |
| 514 | elif line.startswith('started: '): |
| 515 | date = line.rstrip() |
| 516 | date = date[9:] |
| 517 | date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y') |
| 518 | int_date = ( |
| 519 | date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday) |
| 520 | date = time.strftime('%a %b %d %Y', date_obj) |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 521 | elif not status and line.startswith('status: '): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 522 | status = line.rstrip() |
| 523 | words = status.split(':') |
| 524 | status = words[-1] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 525 | elif line.find('Suite passed with a warning') != -1: |
| 526 | status = 'WARNING' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 527 | elif line.startswith('@@@STEP_LINK@Link to suite@'): |
| 528 | afe_line = line.rstrip() |
| 529 | words = afe_line.split('@') |
| 530 | for w in words: |
| 531 | if w.startswith('http'): |
| 532 | afe_line = w |
| 533 | afe_line = afe_line.replace('&', '&') |
| 534 | elif 'INFO: RunCommand:' in line: |
| 535 | words = line.split() |
| 536 | for i in range(0, len(words) - 1): |
| 537 | if words[i] == '--board': |
| 538 | board = words[i + 1] |
| 539 | |
| 540 | test_dict = test_data_dict[test] |
| 541 | test_list = test_dict['tests'] |
| 542 | |
| 543 | if build_ok: |
| 544 | for t in test_list: |
| 545 | if not t in passed and not t in failed: |
| 546 | not_run[t] = True |
| 547 | |
| 548 | total_pass = len(passed) |
| 549 | total_fail = len(failed) |
| 550 | total_notrun = len(not_run) |
| 551 | |
| 552 | else: |
| 553 | total_pass = 0 |
| 554 | total_fail = 0 |
| 555 | total_notrun = 0 |
| 556 | status = 'Not found.' |
| 557 | if not build_ok: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 558 | return [], date, board, 0, ' ' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 559 | |
| 560 | build_dict = dict() |
| 561 | build_dict['id'] = build_num |
| 562 | build_dict['builder'] = builder |
| 563 | build_dict['date'] = date |
| 564 | build_dict['build_link'] = build_link |
| 565 | build_dict['total_pass'] = total_pass |
| 566 | build_dict['total_fail'] = total_fail |
| 567 | build_dict['total_not_run'] = total_notrun |
| 568 | build_dict['afe_job_link'] = afe_line |
| 569 | build_dict['provision_errors'] = num_provision_errors |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 570 | if status.strip() == 'SUCCESS': |
| 571 | build_dict['color'] = 'green ' |
| 572 | elif status.strip() == 'FAILURE': |
| 573 | build_dict['color'] = ' red ' |
| 574 | elif status.strip() == 'WARNING': |
| 575 | build_dict['color'] = 'orange' |
| 576 | else: |
| 577 | build_dict['color'] = ' ' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 578 | |
| 579 | # Use YYYYMMDD (integer) as the build record key |
| 580 | if build_ok: |
| 581 | if board in test_dict: |
| 582 | board_dict = test_dict[board] |
| 583 | else: |
| 584 | board_dict = dict() |
| 585 | board_dict[int_date] = build_dict |
| 586 | |
| 587 | # Only keep the last 5 records (based on date) |
| 588 | keys_list = board_dict.keys() |
| 589 | if len(keys_list) > MAX_SAVE_RECORDS: |
| 590 | min_key = min(keys_list) |
| 591 | del board_dict[min_key] |
| 592 | |
| 593 | # Make sure changes get back into the main dictionary |
| 594 | test_dict[board] = board_dict |
| 595 | test_data_dict[test] = test_dict |
| 596 | |
| 597 | if len(failed) > 0: |
| 598 | RecordFailures(failure_dict, board, test, builder, int_date, log_file, |
| 599 | build_num, failed) |
| 600 | |
| 601 | summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun) |
| 602 | |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 603 | return summary_result, date, board, int_date, build_dict['color'] |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 604 | |
| 605 | |
| 606 | def DownloadLogFile(builder, buildnum, test, test_family): |
| 607 | |
| 608 | ce = command_executer.GetCommandExecuter() |
| 609 | os.system('mkdir -p %s/%s/%s' % (DOWNLOAD_DIR, builder, test)) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 610 | if builder in ROTATING_BUILDERS: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 611 | source = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 612 | '/builders/%s/builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 613 | (builder, buildnum, test_family, test)) |
| 614 | build_link = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 615 | '/builders/%s/builds/%d' % (builder, buildnum)) |
| 616 | else: |
| 617 | source = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s/' |
| 618 | 'builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 619 | (builder, buildnum, test_family, test)) |
| 620 | build_link = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s' |
| 621 | '/builds/%d' % (builder, buildnum)) |
| 622 | |
| 623 | target = '%s/%s/%s/%d' % (DOWNLOAD_DIR, builder, test, buildnum) |
| 624 | if not os.path.isfile(target) or os.path.getsize(target) == 0: |
| 625 | cmd = 'sso_client %s > %s' % (source, target) |
| 626 | status = ce.RunCommand(cmd) |
| 627 | if status != 0: |
| 628 | return '', '' |
| 629 | |
| 630 | return target, build_link |
| 631 | |
| 632 | |
Manoj Gupta | 6382452 | 2016-12-14 11:05:18 -0800 | [diff] [blame] | 633 | # Check for prodaccess. |
| 634 | def CheckProdAccess(): |
| 635 | status, output, _ = command_executer.GetCommandExecuter().RunCommandWOutput( |
| 636 | 'prodcertstatus') |
| 637 | if status != 0: |
| 638 | return False |
| 639 | # Verify that status is not expired |
| 640 | if 'expires' in output: |
| 641 | return True |
| 642 | return False |
| 643 | |
| 644 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 645 | def ValidOptions(parser, options): |
| 646 | too_many_options = False |
| 647 | if options.main: |
| 648 | if options.rotating or options.failures_report: |
| 649 | too_many_options = True |
| 650 | elif options.rotating and options.failures_report: |
| 651 | too_many_options = True |
| 652 | |
| 653 | if too_many_options: |
| 654 | parser.error('Can only specify one of --main, --rotating or' |
| 655 | ' --failures_report.') |
| 656 | |
| 657 | conflicting_failure_options = False |
| 658 | if options.failures_report and options.omit_failures: |
| 659 | conflicting_failure_options = True |
| 660 | parser.error('Cannot specify both --failures_report and --omit_failures.') |
| 661 | |
| 662 | return not too_many_options and not conflicting_failure_options |
| 663 | |
| 664 | |
| 665 | def Main(argv): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 666 | """Main function for this script.""" |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 667 | parser = argparse.ArgumentParser() |
| 668 | parser.add_argument( |
| 669 | '--main', |
| 670 | dest='main', |
| 671 | default=False, |
| 672 | action='store_true', |
| 673 | help='Generate report only for main waterfall ' |
| 674 | 'builders.') |
| 675 | parser.add_argument( |
| 676 | '--rotating', |
| 677 | dest='rotating', |
| 678 | default=False, |
| 679 | action='store_true', |
| 680 | help='Generate report only for rotating builders.') |
| 681 | parser.add_argument( |
| 682 | '--failures_report', |
| 683 | dest='failures_report', |
| 684 | default=False, |
| 685 | action='store_true', |
| 686 | help='Only generate the failures section of the report.') |
| 687 | parser.add_argument( |
| 688 | '--omit_failures', |
| 689 | dest='omit_failures', |
| 690 | default=False, |
| 691 | action='store_true', |
| 692 | help='Do not generate the failures section of the report.') |
| 693 | parser.add_argument( |
| 694 | '--no_update', |
| 695 | dest='no_update', |
| 696 | default=False, |
| 697 | action='store_true', |
| 698 | help='Run reports, but do not update the data files.') |
| 699 | |
| 700 | options = parser.parse_args(argv) |
| 701 | |
| 702 | if not ValidOptions(parser, options): |
| 703 | return 1 |
| 704 | |
| 705 | main_only = options.main |
| 706 | rotating_only = options.rotating |
| 707 | failures_report = options.failures_report |
| 708 | omit_failures = options.omit_failures |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 709 | |
| 710 | test_data_dict = dict() |
| 711 | failure_dict = dict() |
Manoj Gupta | 6382452 | 2016-12-14 11:05:18 -0800 | [diff] [blame] | 712 | |
| 713 | prod_access = CheckProdAccess() |
| 714 | if not prod_access: |
| 715 | print('ERROR: Please run prodaccess first.') |
| 716 | return |
| 717 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 718 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file: |
| 719 | test_data_dict = json.load(input_file) |
| 720 | |
| 721 | with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp: |
| 722 | failure_dict = json.load(fp) |
| 723 | |
| 724 | builds = GetBuilds() |
| 725 | |
| 726 | waterfall_report_dict = dict() |
| 727 | rotating_report_dict = dict() |
| 728 | int_date = 0 |
| 729 | for test_desc in TESTS: |
| 730 | test, test_family = test_desc |
| 731 | for build in builds: |
| 732 | (builder, buildnum) = build |
| 733 | if test.startswith('kernel') and 'llvm' in builder: |
| 734 | continue |
| 735 | if 'x86' in builder and not test.startswith('bvt'): |
| 736 | continue |
| 737 | target, build_link = DownloadLogFile(builder, buildnum, test, test_family) |
| 738 | |
| 739 | if os.path.exists(target): |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 740 | test_summary, report_date, board, tmp_date, color = ParseLogFile( |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 741 | target, test_data_dict, failure_dict, test, builder, buildnum, |
| 742 | build_link) |
| 743 | |
| 744 | if tmp_date != 0: |
| 745 | int_date = tmp_date |
| 746 | |
| 747 | if builder in ROTATING_BUILDERS: |
| 748 | UpdateReport(rotating_report_dict, builder, test, report_date, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 749 | build_link, test_summary, board, color) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 750 | else: |
| 751 | UpdateReport(waterfall_report_dict, builder, test, report_date, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 752 | build_link, test_summary, board, color) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 753 | |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 754 | PruneOldFailures(failure_dict, int_date) |
| 755 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 756 | if waterfall_report_dict and not rotating_only and not failures_report: |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 757 | main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict, |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 758 | 'main', int_date, omit_failures) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 759 | EmailReport(main_report, 'Main', format_date(int_date)) |
| 760 | shutil.copy(main_report, ARCHIVE_DIR) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 761 | if rotating_report_dict and not main_only and not failures_report: |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 762 | rotating_report = GenerateWaterfallReport(rotating_report_dict, |
| 763 | failure_dict, 'rotating', |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 764 | int_date, omit_failures) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 765 | EmailReport(rotating_report, 'Rotating', format_date(int_date)) |
| 766 | shutil.copy(rotating_report, ARCHIVE_DIR) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 767 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 768 | if failures_report: |
| 769 | failures_report = GenerateFailuresReport(failure_dict, int_date) |
| 770 | EmailReport(failures_report, 'Failures', format_date(int_date)) |
| 771 | shutil.copy(failures_report, ARCHIVE_DIR) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 772 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 773 | if not options.no_update: |
| 774 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file: |
| 775 | json.dump(test_data_dict, out_file, indent=2) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 776 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 777 | with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file: |
| 778 | json.dump(failure_dict, out_file, indent=2) |
| 779 | |
| 780 | UpdateBuilds(builds) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 781 | |
| 782 | |
| 783 | if __name__ == '__main__': |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 784 | Main(sys.argv[1:]) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 785 | sys.exit(0) |