blob: cb97c20888a6995231f19c0094e8708a518ba53d [file] [log] [blame]
Kuang-che Wub9705bd2018-06-28 17:59:18 +08001#!/usr/bin/env python2
2# -*- coding: utf-8 -*-
3# Copyright 2018 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6"""Evaluate ChromeOS autotest.
7
8Note that by default 'test_that' will install dependency packages of autotest
9only once. For example, if you overwrote chrome's unittest binary, your new
Kuang-che Wu927231f2018-07-24 14:21:56 +080010binary will be persistent across autotest runs. Add --reinstall if you want
Kuang-che Wub9705bd2018-06-28 17:59:18 +080011clean autotest install.
12"""
13from __future__ import print_function
14import argparse
15import json
16import logging
17import os
18import re
19import subprocess
20import sys
21
22from bisect_kit import cli
23from bisect_kit import common
24from bisect_kit import configure
25from bisect_kit import cros_util
26from bisect_kit import util
27
28logger = logging.getLogger(__name__)
29
30OLD = 'old'
31NEW = 'new'
32SKIP = 'skip'
33FATAL = 'fatal'
34
35EXIT_CODE_MAP = {
36 OLD: 0,
37 NEW: 1,
38 SKIP: 125,
39 FATAL: 126,
40}
41
42
43def create_argument_parser():
44 parser = argparse.ArgumentParser(description=__doc__)
45 common.add_common_arguments(parser)
46 parser.add_argument(
47 'dut',
48 nargs='?',
49 type=cli.argtype_notempty,
50 metavar='DUT',
51 default=configure.get('DUT', ''))
52 parser.add_argument(
53 '--chromeos_root',
54 type=cli.argtype_dir_path,
55 metavar='CHROMEOS_ROOT',
56 default=configure.get('CHROMEOS_ROOT', ''),
57 help='ChromeOS tree root')
58 parser.add_argument(
59 '--test_name',
60 required=True,
61 help='Test name, like "video_VideoDecodeAccelerator.h264"')
62 parser.add_argument(
63 '--metric',
64 help=
65 'Metric name of performance test; example: "cheets_SystemRawImageSize"')
66 parser.add_argument(
67 '--old_value',
68 type=float,
69 help='For performance test, old value of given metric')
70 parser.add_argument(
71 '--new_value',
72 type=float,
73 help='For performance test, new value of given metric')
74 parser.add_argument(
75 '--prebuilt',
76 action='store_true',
77 help='Run autotest using existing prebuilt package if specified; '
78 'otherwise use the default one')
79 parser.add_argument(
80 '--reinstall',
81 action='store_true',
82 help='Remove existing autotest folder on the DUT first')
83 parser.add_argument(
84 '--args',
85 help='Extra args passed to "test_that --args"; Overrides the default')
86
87 return parser
88
89
90def parse_test_report_log(result_log, metric):
91 """Parses autotest result log.
92
93 Args:
94 result_log: content of test_report.log
95 metric: what metric to capture if not None
96
97 Returns:
98 passed, values:
99 passed: True if test run successfully
100 values: captured metric values; None if test failed or metric is None
101 """
102 m = re.search(r'Total PASS: (\d+)/(\d+)', result_log)
103 if not m or m.group(1) != m.group(2):
104 return False, None
105
106 if not metric:
107 return True, None
108
109 values = []
110 for line in result_log.splitlines():
111 m = re.match(r'^(\S+)\s+(\w+)(?:\{\d+\})?\s+(\d+\.\d+)$', line)
112 if not m:
113 continue
114 if m.group(2) == metric:
115 values.append(float(m.group(3)))
116 return True, values
117
118
119def parse_test_result_chart(json_path, metric):
120 data = json.load(open(json_path))
Kuang-che Wu3331caf2018-09-06 19:47:02 +0800121
122 # format 1, telemetry
123 if 'charts' in data:
124 summary = data['charts'][metric]['summary']
125
126 # format 2, autotest without graph
127 elif metric in data:
128 summary = data[metric]['summary']
129
130 # format 3, autotest with graph
131 elif metric.count('.') == 1:
132 name, subname = metric.split('.')
133 summary = data[name][subname]
134
135 else:
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800136 logger.error('metric "%s" not in %s', metric, json_path)
Kuang-che Wudd802672018-08-10 19:40:14 +0800137 return []
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800138
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800139 if 'values' in summary:
140 return summary['values']
141 return [summary['value']]
142
143
144def get_additional_test_args(test_name):
145 """Gets extra arguments to specific test.
146
147 Some tests may require special arguments to run.
148
149 Args:
150 test_name: test name
151
152 Returns:
153 arguments (str)
154 """
155 if test_name.startswith('telemetry_'):
156 return 'local=True'
157 return ''
158
159
160def run_test(opts):
161 """Runs an autotest test.
162
163 Args:
164 opts: An argparse.Namespace to hold command line arguments.
165
166 Returns:
167 path of test result (outside chroot)
168 """
169 if opts.reinstall:
170 util.check_call('ssh', opts.dut, 'rm', '-rf', '/usr/local/autotest')
171
172 prebuilt_autotest_dir = os.path.join(cros_util.chromeos_root_inside_chroot,
173 cros_util.prebuilt_autotest_dir)
174 # Set results dir inside source tree, so it's easier to access them outside
175 # chroot.
176 results_dir = os.path.join(cros_util.chromeos_root_inside_chroot,
177 'tmp/autotest_results_tmp')
178 if opts.prebuilt:
179 test_that_bin = os.path.join(prebuilt_autotest_dir,
180 'site_utils/test_that.py')
181 else:
182 test_that_bin = '/usr/bin/test_that'
183 cmd = [test_that_bin, opts.dut, opts.test_name, '--results_dir', results_dir]
184 if opts.prebuilt:
185 cmd += ['--autotest_dir', prebuilt_autotest_dir]
186
187 args = get_additional_test_args(opts.test_name)
188 if opts.args:
189 if args:
Kuang-che Wu74768d32018-09-07 12:03:24 +0800190 logger.info(
191 'default test_that args `%s` is overridden by '
192 'command line option `%s`', args, opts.args)
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800193 cmd += ['--args', opts.args]
194 elif args:
195 cmd += ['--args', args]
196
197 output = cros_util.cros_sdk(opts.chromeos_root, *cmd)
198
199 m = re.search(r'Finished running tests. Results can be found in (\S+)',
200 output)
201 if not m:
202 logger.error('result dir is unknown')
203 return None
204 assert m.group(1) == results_dir
205 return results_dir.replace(cros_util.chromeos_root_inside_chroot,
206 opts.chromeos_root)
207
208
209def gather_test_result(opts, result_dir):
210 result_log_path = os.path.join(result_dir, 'test_report.log')
211 result_log = open(result_log_path).read()
212
213 passed, values = parse_test_report_log(result_log, opts.metric)
Kuang-che Wub887b2c2018-08-17 21:42:27 +0800214 if opts.metric and passed and not values:
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800215 values = []
216 for root, _, files in os.walk(result_dir):
217 for filename in files:
218 if filename != 'results-chart.json':
219 continue
220 full_path = os.path.join(root, filename)
221 values += parse_test_result_chart(full_path, opts.metric)
222
223 return passed, values
224
225
226def main(args=None):
227 common.init()
228 parser = create_argument_parser()
229 opts = parser.parse_args(args)
230 common.config_logging(opts)
231
232 if not cros_util.is_dut(opts.dut):
233 return FATAL
234
235 # Verify command line options.
236 if opts.metric:
237 if opts.old_value is None:
238 logger.error('--old_value is not provided')
239 return FATAL
240 if opts.new_value is None:
241 logger.error('--new_value is not provided')
242 return FATAL
243 else:
244 if opts.old_value is not None:
245 logger.error('--old_value is provided but --metric is not')
246 return FATAL
247 if opts.new_value is not None:
248 logger.error('--new_value is provided but --metric is not')
249 return FATAL
250
251 try:
252 result_dir = run_test(opts)
253 except subprocess.CalledProcessError:
Kuang-che Wu54adb7b2018-08-20 17:15:26 +0800254 # TODO(kcwu): analyze fail reason from log and abort if they are real fatal
255 # cases.
256 if opts.metric:
257 logger.info('failed before test start; SKIP')
258 return SKIP
259 else:
260 logger.info('failed before test start; NEW')
261 return NEW
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800262
Kuang-che Wudd802672018-08-10 19:40:14 +0800263 if result_dir is None:
264 return FATAL
265
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800266 passed, values = gather_test_result(opts, result_dir)
267
268 if opts.metric:
269 if not passed:
270 logger.warning('test did not pass; SKIP')
271 return SKIP
272 if not values:
273 logger.warning('no values found; SKIP')
274 return SKIP
275
276 print('BISECT_RESULT_VALUES=', ' '.join(map(str, values)))
277 average = float(sum(values)) / len(values)
Kuang-che Wu689f1542018-08-20 17:45:58 +0800278 if abs(average - opts.old_value) < abs(average - opts.new_value):
Kuang-che Wub9705bd2018-06-28 17:59:18 +0800279 logger.info('values=%s, average=%s; OLD', values, average)
280 return OLD
281 logger.info('values=%s, average=%s; NEW', values, average)
282 return NEW
283 else:
284 if passed:
285 logger.info('passed')
286 return OLD
287 logger.info('failed')
288 return NEW
289
290
291if __name__ == '__main__':
292 sys.exit(EXIT_CODE_MAP[main()])