Kuang-che Wu | b9705bd | 2018-06-28 17:59:18 +0800 | [diff] [blame] | 1 | #!/usr/bin/env python2 |
| 2 | # -*- coding: utf-8 -*- |
| 3 | # Copyright 2018 The Chromium OS Authors. All rights reserved. |
| 4 | # Use of this source code is governed by a BSD-style license that can be |
| 5 | # found in the LICENSE file. |
| 6 | """Evaluate ChromeOS autotest. |
| 7 | |
| 8 | Note that by default 'test_that' will install dependency packages of autotest |
| 9 | only once. For example, if you overwrote chrome's unittest binary, your new |
Kuang-che Wu | 927231f | 2018-07-24 14:21:56 +0800 | [diff] [blame^] | 10 | binary will be persistent across autotest runs. Add --reinstall if you want |
Kuang-che Wu | b9705bd | 2018-06-28 17:59:18 +0800 | [diff] [blame] | 11 | clean autotest install. |
| 12 | """ |
| 13 | from __future__ import print_function |
| 14 | import argparse |
| 15 | import json |
| 16 | import logging |
| 17 | import os |
| 18 | import re |
| 19 | import subprocess |
| 20 | import sys |
| 21 | |
| 22 | from bisect_kit import cli |
| 23 | from bisect_kit import common |
| 24 | from bisect_kit import configure |
| 25 | from bisect_kit import cros_util |
| 26 | from bisect_kit import util |
| 27 | |
| 28 | logger = logging.getLogger(__name__) |
| 29 | |
| 30 | OLD = 'old' |
| 31 | NEW = 'new' |
| 32 | SKIP = 'skip' |
| 33 | FATAL = 'fatal' |
| 34 | |
| 35 | EXIT_CODE_MAP = { |
| 36 | OLD: 0, |
| 37 | NEW: 1, |
| 38 | SKIP: 125, |
| 39 | FATAL: 126, |
| 40 | } |
| 41 | |
| 42 | |
| 43 | def create_argument_parser(): |
| 44 | parser = argparse.ArgumentParser(description=__doc__) |
| 45 | common.add_common_arguments(parser) |
| 46 | parser.add_argument( |
| 47 | 'dut', |
| 48 | nargs='?', |
| 49 | type=cli.argtype_notempty, |
| 50 | metavar='DUT', |
| 51 | default=configure.get('DUT', '')) |
| 52 | parser.add_argument( |
| 53 | '--chromeos_root', |
| 54 | type=cli.argtype_dir_path, |
| 55 | metavar='CHROMEOS_ROOT', |
| 56 | default=configure.get('CHROMEOS_ROOT', ''), |
| 57 | help='ChromeOS tree root') |
| 58 | parser.add_argument( |
| 59 | '--test_name', |
| 60 | required=True, |
| 61 | help='Test name, like "video_VideoDecodeAccelerator.h264"') |
| 62 | parser.add_argument( |
| 63 | '--metric', |
| 64 | help= |
| 65 | 'Metric name of performance test; example: "cheets_SystemRawImageSize"') |
| 66 | parser.add_argument( |
| 67 | '--old_value', |
| 68 | type=float, |
| 69 | help='For performance test, old value of given metric') |
| 70 | parser.add_argument( |
| 71 | '--new_value', |
| 72 | type=float, |
| 73 | help='For performance test, new value of given metric') |
| 74 | parser.add_argument( |
| 75 | '--prebuilt', |
| 76 | action='store_true', |
| 77 | help='Run autotest using existing prebuilt package if specified; ' |
| 78 | 'otherwise use the default one') |
| 79 | parser.add_argument( |
| 80 | '--reinstall', |
| 81 | action='store_true', |
| 82 | help='Remove existing autotest folder on the DUT first') |
| 83 | parser.add_argument( |
| 84 | '--args', |
| 85 | help='Extra args passed to "test_that --args"; Overrides the default') |
| 86 | |
| 87 | return parser |
| 88 | |
| 89 | |
| 90 | def parse_test_report_log(result_log, metric): |
| 91 | """Parses autotest result log. |
| 92 | |
| 93 | Args: |
| 94 | result_log: content of test_report.log |
| 95 | metric: what metric to capture if not None |
| 96 | |
| 97 | Returns: |
| 98 | passed, values: |
| 99 | passed: True if test run successfully |
| 100 | values: captured metric values; None if test failed or metric is None |
| 101 | """ |
| 102 | m = re.search(r'Total PASS: (\d+)/(\d+)', result_log) |
| 103 | if not m or m.group(1) != m.group(2): |
| 104 | return False, None |
| 105 | |
| 106 | if not metric: |
| 107 | return True, None |
| 108 | |
| 109 | values = [] |
| 110 | for line in result_log.splitlines(): |
| 111 | m = re.match(r'^(\S+)\s+(\w+)(?:\{\d+\})?\s+(\d+\.\d+)$', line) |
| 112 | if not m: |
| 113 | continue |
| 114 | if m.group(2) == metric: |
| 115 | values.append(float(m.group(3))) |
| 116 | return True, values |
| 117 | |
| 118 | |
| 119 | def parse_test_result_chart(json_path, metric): |
| 120 | data = json.load(open(json_path)) |
| 121 | if metric not in data: |
| 122 | logger.error('metric "%s" not in %s', metric, json_path) |
| 123 | return None |
| 124 | |
| 125 | summary = data[metric]['summary'] |
| 126 | if 'values' in summary: |
| 127 | return summary['values'] |
| 128 | return [summary['value']] |
| 129 | |
| 130 | |
| 131 | def get_additional_test_args(test_name): |
| 132 | """Gets extra arguments to specific test. |
| 133 | |
| 134 | Some tests may require special arguments to run. |
| 135 | |
| 136 | Args: |
| 137 | test_name: test name |
| 138 | |
| 139 | Returns: |
| 140 | arguments (str) |
| 141 | """ |
| 142 | if test_name.startswith('telemetry_'): |
| 143 | return 'local=True' |
| 144 | return '' |
| 145 | |
| 146 | |
| 147 | def run_test(opts): |
| 148 | """Runs an autotest test. |
| 149 | |
| 150 | Args: |
| 151 | opts: An argparse.Namespace to hold command line arguments. |
| 152 | |
| 153 | Returns: |
| 154 | path of test result (outside chroot) |
| 155 | """ |
| 156 | if opts.reinstall: |
| 157 | util.check_call('ssh', opts.dut, 'rm', '-rf', '/usr/local/autotest') |
| 158 | |
| 159 | prebuilt_autotest_dir = os.path.join(cros_util.chromeos_root_inside_chroot, |
| 160 | cros_util.prebuilt_autotest_dir) |
| 161 | # Set results dir inside source tree, so it's easier to access them outside |
| 162 | # chroot. |
| 163 | results_dir = os.path.join(cros_util.chromeos_root_inside_chroot, |
| 164 | 'tmp/autotest_results_tmp') |
| 165 | if opts.prebuilt: |
| 166 | test_that_bin = os.path.join(prebuilt_autotest_dir, |
| 167 | 'site_utils/test_that.py') |
| 168 | else: |
| 169 | test_that_bin = '/usr/bin/test_that' |
| 170 | cmd = [test_that_bin, opts.dut, opts.test_name, '--results_dir', results_dir] |
| 171 | if opts.prebuilt: |
| 172 | cmd += ['--autotest_dir', prebuilt_autotest_dir] |
| 173 | |
| 174 | args = get_additional_test_args(opts.test_name) |
| 175 | if opts.args: |
| 176 | if args: |
Kuang-che Wu | 927231f | 2018-07-24 14:21:56 +0800 | [diff] [blame^] | 177 | logger.info('default test_that args `%s` is overridden by ' |
Kuang-che Wu | b9705bd | 2018-06-28 17:59:18 +0800 | [diff] [blame] | 178 | 'command line option `%s`', args, opts.args) |
| 179 | cmd += ['--args', opts.args] |
| 180 | elif args: |
| 181 | cmd += ['--args', args] |
| 182 | |
| 183 | output = cros_util.cros_sdk(opts.chromeos_root, *cmd) |
| 184 | |
| 185 | m = re.search(r'Finished running tests. Results can be found in (\S+)', |
| 186 | output) |
| 187 | if not m: |
| 188 | logger.error('result dir is unknown') |
| 189 | return None |
| 190 | assert m.group(1) == results_dir |
| 191 | return results_dir.replace(cros_util.chromeos_root_inside_chroot, |
| 192 | opts.chromeos_root) |
| 193 | |
| 194 | |
| 195 | def gather_test_result(opts, result_dir): |
| 196 | result_log_path = os.path.join(result_dir, 'test_report.log') |
| 197 | result_log = open(result_log_path).read() |
| 198 | |
| 199 | passed, values = parse_test_report_log(result_log, opts.metric) |
| 200 | if passed and not values: |
| 201 | values = [] |
| 202 | for root, _, files in os.walk(result_dir): |
| 203 | for filename in files: |
| 204 | if filename != 'results-chart.json': |
| 205 | continue |
| 206 | full_path = os.path.join(root, filename) |
| 207 | values += parse_test_result_chart(full_path, opts.metric) |
| 208 | |
| 209 | return passed, values |
| 210 | |
| 211 | |
| 212 | def main(args=None): |
| 213 | common.init() |
| 214 | parser = create_argument_parser() |
| 215 | opts = parser.parse_args(args) |
| 216 | common.config_logging(opts) |
| 217 | |
| 218 | if not cros_util.is_dut(opts.dut): |
| 219 | return FATAL |
| 220 | |
| 221 | # Verify command line options. |
| 222 | if opts.metric: |
| 223 | if opts.old_value is None: |
| 224 | logger.error('--old_value is not provided') |
| 225 | return FATAL |
| 226 | if opts.new_value is None: |
| 227 | logger.error('--new_value is not provided') |
| 228 | return FATAL |
| 229 | else: |
| 230 | if opts.old_value is not None: |
| 231 | logger.error('--old_value is provided but --metric is not') |
| 232 | return FATAL |
| 233 | if opts.new_value is not None: |
| 234 | logger.error('--new_value is provided but --metric is not') |
| 235 | return FATAL |
| 236 | |
| 237 | try: |
| 238 | result_dir = run_test(opts) |
| 239 | except subprocess.CalledProcessError: |
| 240 | logger.info('failed before test start; FATAL') |
| 241 | return FATAL |
| 242 | |
| 243 | passed, values = gather_test_result(opts, result_dir) |
| 244 | |
| 245 | if opts.metric: |
| 246 | if not passed: |
| 247 | logger.warning('test did not pass; SKIP') |
| 248 | return SKIP |
| 249 | if not values: |
| 250 | logger.warning('no values found; SKIP') |
| 251 | return SKIP |
| 252 | |
| 253 | print('BISECT_RESULT_VALUES=', ' '.join(map(str, values))) |
| 254 | average = float(sum(values)) / len(values) |
| 255 | if average < (opts.old_value + opts.new_value) / 2: |
| 256 | logger.info('values=%s, average=%s; OLD', values, average) |
| 257 | return OLD |
| 258 | logger.info('values=%s, average=%s; NEW', values, average) |
| 259 | return NEW |
| 260 | else: |
| 261 | if passed: |
| 262 | logger.info('passed') |
| 263 | return OLD |
| 264 | logger.info('failed') |
| 265 | return NEW |
| 266 | |
| 267 | |
| 268 | if __name__ == '__main__': |
| 269 | sys.exit(EXIT_CODE_MAP[main()]) |