Simran Basi | 833814b | 2013-01-29 13:13:43 -0800 | [diff] [blame] | 1 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. |
| 2 | # Use of this source code is governed by a BSD-style license that can be |
| 3 | # found in the LICENSE file. |
| 4 | |
| 5 | import logging |
| 6 | import os |
| 7 | import re |
| 8 | import StringIO |
| 9 | |
| 10 | import common |
| 11 | from autotest_lib.client.common_lib import error, utils |
| 12 | from autotest_lib.client.common_lib.cros import dev_server |
| 13 | |
| 14 | |
| 15 | TELEMETRY_RUN_BENCHMARKS_SCRIPT = 'tools/perf/run_multipage_benchmarks' |
| 16 | TELEMETRY_RUN_TESTS_SCRIPT = 'tools/telemetry/run_tests' |
Simran Basi | ee9e860 | 2013-03-19 11:52:18 -0700 | [diff] [blame^] | 17 | TELEMETRY_TIMEOUT_MINS = 60 |
Simran Basi | 833814b | 2013-01-29 13:13:43 -0800 | [diff] [blame] | 18 | |
| 19 | # Result Statuses |
| 20 | SUCCESS_STATUS = 'SUCCESS' |
| 21 | WARNING_STATUS = 'WARNING' |
| 22 | FAILED_STATUS = 'FAILED' |
| 23 | |
| 24 | |
| 25 | class TelemetryResult(object): |
| 26 | """Class to represent the results of a telemetry run. |
| 27 | |
| 28 | This class represents the results of a telemetry run, whether it ran |
| 29 | successful, failed or had warnings. |
| 30 | """ |
| 31 | |
| 32 | |
| 33 | def __init__(self, exit_code=0, stdout='', stderr=''): |
| 34 | """Initializes this TelemetryResultObject instance. |
| 35 | |
| 36 | @param status: Status of the telemtry run. |
| 37 | @param stdout: Stdout of the telemetry run. |
| 38 | @param stderr: Stderr of the telemetry run. |
| 39 | """ |
| 40 | if exit_code == 0: |
| 41 | self.status = SUCCESS_STATUS |
| 42 | else: |
| 43 | self.status = FAILED_STATUS |
| 44 | |
| 45 | self.perf_keyvals = {} |
| 46 | self._stdout = stdout |
| 47 | self._stderr = stderr |
| 48 | self.output = '\n'.join([stdout, stderr]) |
| 49 | |
| 50 | |
| 51 | def _cleanup_value(self, value): |
| 52 | """Cleanup a value string. |
| 53 | |
| 54 | Given a string representing a value clean it up by removing the space |
| 55 | and parenthesis around the units, and either append the units or get |
| 56 | rid of them. |
| 57 | |
| 58 | Examples: |
| 59 | loadtime (ms) -> loadtime_ms |
| 60 | image_count () -> image_count |
| 61 | image_count (count) -> image_count |
| 62 | CodeLoad (score (bigger is better)) -> CodeLoad_score |
| 63 | load_percent (%) -> load_percent |
| 64 | score (runs/s) -> score_runs_per_s |
| 65 | |
| 66 | @param value: Value we are cleaning up. |
| 67 | |
| 68 | @result a String representing the cleaned up value. |
| 69 | """ |
| 70 | value_sections = value.split(' (') |
| 71 | value_name = value_sections[0] |
| 72 | # There can be sub-parens in the units -> if so remove them. |
| 73 | units = value_sections[1].split('(')[0] |
| 74 | units = units.split(')')[0] |
| 75 | if units is '%': |
| 76 | units = 'percent' |
| 77 | if '/' in units: |
| 78 | units = units.replace('/','_per_') |
| 79 | if not units: |
| 80 | return value_name |
| 81 | if value_name.endswith(units): |
| 82 | return value_name |
| 83 | return '_'.join([value_name, units]) |
| 84 | |
| 85 | |
| 86 | def parse_benchmark_results(self): |
| 87 | """Parse the results of a telemetry benchmark run. |
| 88 | |
| 89 | Stdout has the format of CSV at the top and then the output repeated |
| 90 | in RESULT block format below. |
| 91 | |
| 92 | We will parse the CSV part to get the perf key-value pairs we are |
| 93 | interested in. |
| 94 | |
| 95 | Example stdout: |
| 96 | url,average_commit_time (ms),average_image_gathering_time (ms) |
| 97 | file:///tough_scrolling_cases/cust_scrollbar.html,1.3644,0 |
| 98 | RESULT average_commit_time: <URL>= <SCORE> score |
| 99 | RESULT average_image_gathering_time: <URL>= <SCORE> score |
| 100 | |
| 101 | We want to generate perf keys in the format of value-url i.e.: |
| 102 | average_commit_time-http____www.google.com |
| 103 | Where we also removed non non-alphanumeric characters except '.', '_', |
| 104 | and '-'. |
| 105 | |
| 106 | Stderr has the format of Warnings/Tracebacks. There is always a default |
| 107 | warning of the display enviornment setting. Followed by warnings of |
| 108 | page timeouts or a traceback. |
| 109 | |
| 110 | If there are any other warnings we flag the test as warning. If there |
| 111 | is a traceback we consider this test a failure. |
| 112 | |
| 113 | @param exit_code: Exit code of the the telemetry run. 0 == SUCCESS, |
| 114 | otherwise it is a warning or failure. |
| 115 | @param stdout: Stdout of the telemetry run. |
| 116 | @param stderr: Stderr of the telemetry run. |
| 117 | |
| 118 | @returns A TelemetryResult instance with the results of the telemetry |
| 119 | run. |
| 120 | """ |
| 121 | # The output will be in CSV format. |
| 122 | if not self._stdout: |
| 123 | # Nothing in stdout implies a test failure. |
| 124 | logging.error('No stdout, test failed.') |
| 125 | self.status = FAILED_STATUS |
| 126 | return |
| 127 | |
| 128 | stdout_lines = self._stdout.splitlines() |
| 129 | value_names = None |
| 130 | for line in stdout_lines: |
| 131 | if not line: |
| 132 | continue |
| 133 | if not value_names and line.startswith('url,'): |
| 134 | # This line lists out all the values we care about and we drop |
| 135 | # the first one as it is the url name. |
| 136 | value_names = line.split(',')[1:] |
| 137 | # Clean up each value name. |
| 138 | value_names = [self._cleanup_value(v) for v in value_names] |
| 139 | logging.debug('Value_names: %s', value_names) |
| 140 | if not value_names: |
| 141 | continue |
| 142 | if ' ' in line: |
| 143 | # We are in a non-CSV part of the output, ignore this line. |
| 144 | continue |
| 145 | # We are now a CSV line we care about, parse it accordingly. |
| 146 | line_values = line.split(',') |
| 147 | # Grab the URL |
| 148 | url = line_values[0] |
| 149 | # We want the perf keys to be format value|url. Example: |
| 150 | # load_time-http___www.google.com |
| 151 | # Andd replace all non-alphanumeric characters except |
| 152 | # '-', '.' and '_' with '_' |
| 153 | url_values_names = [re.sub(r'[^\w.-]', '_', '-'.join([v, url])) |
| 154 | for v in value_names] |
| 155 | self.perf_keyvals.update(dict(zip(url_values_names, |
| 156 | line_values[1:]))) |
| 157 | logging.debug('Perf Keyvals: %s', self.perf_keyvals) |
| 158 | |
| 159 | if self.status is SUCCESS_STATUS: |
| 160 | return |
| 161 | |
| 162 | # Otherwise check if simply a Warning occurred or a Failure, |
| 163 | # i.e. a Traceback is listed. |
| 164 | self.status = WARNING_STATUS |
| 165 | for line in self._stderr.splitlines(): |
| 166 | if line.startswith('Traceback'): |
| 167 | self.status = FAILED_STATUS |
| 168 | |
| 169 | |
| 170 | class TelemetryRunner(object): |
| 171 | """Class responsible for telemetry for a given build. |
| 172 | |
| 173 | This class will extract and install telemetry on the devserver and is |
| 174 | responsible for executing the telemetry benchmarks and returning their |
| 175 | output to the caller. |
| 176 | """ |
| 177 | |
| 178 | def __init__(self, host): |
| 179 | """Initializes this telemetry runner instance. |
| 180 | |
| 181 | If telemetry is not installed for this build, it will be. |
| 182 | """ |
| 183 | self._host = host |
| 184 | logging.debug('Grabbing build from AFE.') |
| 185 | |
| 186 | build = host.get_build() |
| 187 | if not build: |
| 188 | logging.error('Unable to locate build label for host: %s.', |
| 189 | self._host.hostname) |
| 190 | raise error.AutotestError('Failed to grab build for host %s.' % |
| 191 | self._host.hostname) |
| 192 | |
| 193 | logging.debug('Setting up telemetry for build: %s', build) |
| 194 | |
| 195 | self._devserver = dev_server.ImageServer.resolve(build) |
| 196 | self._telemetry_path = self._devserver.setup_telemetry(build=build) |
| 197 | logging.debug('Telemetry Path: %s',self._telemetry_path) |
| 198 | |
| 199 | |
| 200 | def _run_telemetry(self, script, test_or_benchmark): |
| 201 | """Runs telemetry on a dut. |
| 202 | |
| 203 | @param script: Telemetry script we want to run. For example: |
| 204 | [path_to_telemetry_src]/src/tools/telemetry/run_tests |
| 205 | @param test_or_benchmark: Name of the test or benchmark we want to run, |
| 206 | with the page_set (if required) as part of the |
| 207 | string. |
| 208 | |
| 209 | @returns A TelemetryResult Instance with the results of this telemetry |
| 210 | execution. |
| 211 | """ |
| 212 | devserver_hostname = self._devserver.url().split( |
| 213 | 'http://')[1].split(':')[0] |
| 214 | telemetry_args = ['ssh', |
| 215 | devserver_hostname, |
| 216 | 'python', |
| 217 | script, |
| 218 | '--browser=cros-chrome', |
| 219 | '--remote=%s' % self._host.hostname, |
| 220 | test_or_benchmark] |
| 221 | |
| 222 | logging.debug('Running Telemetry: %s', ' '.join(telemetry_args)) |
| 223 | output = StringIO.StringIO() |
| 224 | error_output = StringIO.StringIO() |
| 225 | exit_code = 0 |
| 226 | try: |
| 227 | result = utils.run(' '.join(telemetry_args), stdout_tee=output, |
| 228 | stderr_tee=error_output, |
| 229 | timeout=TELEMETRY_TIMEOUT_MINS*60) |
| 230 | exit_code = result.exit_status |
| 231 | except error.CmdError as e: |
| 232 | # Telemetry returned a return code of not 0; for benchmarks this |
| 233 | # can be due to a timeout on one of the pages of the page set and |
| 234 | # we may still have data on the rest. For a test however this |
| 235 | # indicates failure. |
| 236 | logging.debug('Error occurred executing telemetry.') |
| 237 | exit_code = e.result_obj.exit_status |
| 238 | |
| 239 | stdout = output.getvalue() |
| 240 | stderr = error_output.getvalue() |
| 241 | logging.debug('Telemetry completed with exit code: %d.\nstdout:%s\n' |
| 242 | 'stderr:%s', exit_code, stdout, stderr) |
| 243 | |
| 244 | return TelemetryResult(exit_code=exit_code, stdout=stdout, |
| 245 | stderr=stderr) |
| 246 | |
| 247 | |
| 248 | def run_telemetry_test(self, test): |
| 249 | """Runs a telemetry test on a dut. |
| 250 | |
| 251 | @param test: Telemetry test we want to run. |
| 252 | |
| 253 | @returns A TelemetryResult Instance with the results of this telemetry |
| 254 | execution. |
| 255 | """ |
| 256 | logging.debug('Running telemetry test: %s', test) |
| 257 | telemetry_script = os.path.join(self._telemetry_path, |
| 258 | TELEMETRY_RUN_TESTS_SCRIPT) |
| 259 | result = self._run_telemetry(telemetry_script, test) |
| 260 | if result.status is FAILED_STATUS: |
| 261 | raise error.TestFail('Telemetry test: %s failed.', |
| 262 | test) |
| 263 | return result |
| 264 | |
| 265 | |
| 266 | def run_telemetry_benchmark(self, benchmark, page_set, keyval_writer=None): |
| 267 | """Runs a telemetry benchmark on a dut. |
| 268 | |
| 269 | @param benchmark: Benchmark we want to run. |
| 270 | @param page_set: Page set we want to use. |
| 271 | @param keyval_writer: Should be a instance with the function |
| 272 | write_perf_keyval(), if None, no keyvals will be |
| 273 | written. Typically this will be the job object |
| 274 | from a autotest test. |
| 275 | |
| 276 | @returns A TelemetryResult Instance with the results of this telemetry |
| 277 | execution. |
| 278 | """ |
| 279 | logging.debug('Running telemetry benchmark: %s with page set: %s.', |
| 280 | benchmark, page_set) |
| 281 | telemetry_script = os.path.join(self._telemetry_path, |
| 282 | TELEMETRY_RUN_BENCHMARKS_SCRIPT) |
| 283 | page_set_path = os.path.join(self._telemetry_path, |
| 284 | 'tools/perf/page_sets/%s' % page_set) |
| 285 | benchmark_with_pageset = ' '.join([benchmark, page_set_path]) |
| 286 | result = self._run_telemetry(telemetry_script, benchmark_with_pageset) |
| 287 | result.parse_benchmark_results() |
| 288 | |
| 289 | if keyval_writer: |
| 290 | keyval_writer.write_perf_keyval(result.perf_keyvals) |
| 291 | |
| 292 | if result.status is WARNING_STATUS: |
| 293 | raise error.TestWarn('Telemetry Benchmark: %s with page set: %s' |
| 294 | ' exited with Warnings.' % (benchmark, |
| 295 | page_set)) |
| 296 | if result.status is FAILED_STATUS: |
| 297 | raise error.TestFail('Telemetry Benchmark: %s with page set: %s' |
| 298 | ' failed to run.' % (benchmark, |
| 299 | page_set)) |
| 300 | |
| 301 | return result |