maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1 | #!/usr/bin/env python |
Marc-Antoine Ruel | 8add124 | 2013-11-05 17:28:27 -0500 | [diff] [blame] | 2 | # Copyright 2013 The Swarming Authors. All rights reserved. |
Marc-Antoine Ruel | e98b112 | 2013-11-05 20:27:57 -0500 | [diff] [blame] | 3 | # Use of this source code is governed under the Apache License, Version 2.0 that |
| 4 | # can be found in the LICENSE file. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 5 | |
| 6 | """Client tool to trigger tasks or retrieve results from a Swarming server.""" |
| 7 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 8 | __version__ = '0.6.3' |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 9 | |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 10 | import collections |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 11 | import datetime |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 12 | import json |
| 13 | import logging |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 14 | import optparse |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 15 | import os |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 16 | import re |
| 17 | import shutil |
| 18 | import StringIO |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 19 | import subprocess |
| 20 | import sys |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 21 | import threading |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 22 | import time |
| 23 | import urllib |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 24 | import urlparse |
| 25 | import zipfile |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 26 | |
| 27 | from third_party import colorama |
| 28 | from third_party.depot_tools import fix_encoding |
| 29 | from third_party.depot_tools import subcommand |
vadimsh@chromium.org | 6b70621 | 2013-08-28 15:03:46 +0000 | [diff] [blame] | 30 | |
Marc-Antoine Ruel | 8806e62 | 2014-02-12 14:15:53 -0500 | [diff] [blame] | 31 | from utils import file_path |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 32 | from utils import logging_utils |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 33 | from third_party.chromium import natsort |
vadimsh@chromium.org | 6b70621 | 2013-08-28 15:03:46 +0000 | [diff] [blame] | 34 | from utils import net |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 35 | from utils import on_error |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 36 | from utils import threading_utils |
vadimsh@chromium.org | 6b70621 | 2013-08-28 15:03:46 +0000 | [diff] [blame] | 37 | from utils import tools |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 38 | from utils import zip_package |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 39 | |
Vadim Shtayura | e34e13a | 2014-02-02 11:23:26 -0800 | [diff] [blame] | 40 | import auth |
Marc-Antoine Ruel | 8bee66d | 2014-08-28 19:02:07 -0400 | [diff] [blame] | 41 | import isolated_format |
maruel@chromium.org | 7b844a6 | 2013-09-17 13:04:59 +0000 | [diff] [blame] | 42 | import isolateserver |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 43 | import run_isolated |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 44 | |
| 45 | |
| 46 | ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 47 | |
| 48 | |
| 49 | class Failure(Exception): |
| 50 | """Generic failure.""" |
| 51 | pass |
| 52 | |
| 53 | |
| 54 | ### Isolated file handling. |
| 55 | |
| 56 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 57 | def isolated_upload_zip_bundle(isolate_server, bundle): |
| 58 | """Uploads a zip package to Isolate Server and returns raw fetch URL. |
| 59 | |
| 60 | Args: |
| 61 | isolate_server: URL of an Isolate Server. |
| 62 | bundle: instance of ZipPackage to upload. |
| 63 | |
| 64 | Returns: |
| 65 | URL to get the file from. |
| 66 | """ |
| 67 | # Swarming bot needs to be able to grab the file from the Isolate Server using |
| 68 | # a simple HTTPS GET. Use 'default' namespace so that the raw data returned to |
| 69 | # a bot is not zipped, since the swarming_bot doesn't understand compressed |
| 70 | # data. This namespace have nothing to do with |namespace| passed to |
| 71 | # run_isolated.py that is used to store files for isolated task. |
| 72 | logging.info('Zipping up and uploading files...') |
| 73 | start_time = time.time() |
| 74 | isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer()) |
| 75 | with isolateserver.get_storage(isolate_server, 'default') as storage: |
| 76 | uploaded = storage.upload_items([isolate_item]) |
| 77 | bundle_url = storage.get_fetch_url(isolate_item) |
| 78 | elapsed = time.time() - start_time |
| 79 | if isolate_item in uploaded: |
| 80 | logging.info('Upload complete, time elapsed: %f', elapsed) |
| 81 | else: |
| 82 | logging.info('Zip file already on server, time elapsed: %f', elapsed) |
| 83 | return bundle_url |
| 84 | |
| 85 | |
| 86 | def isolated_get_data(isolate_server): |
| 87 | """Returns the 'data' section with all files necessary to bootstrap a task |
| 88 | execution running an isolated task. |
| 89 | |
| 90 | It's mainly zipping run_isolated.zip over and over again. |
| 91 | TODO(maruel): Get rid of this with. |
| 92 | https://code.google.com/p/swarming/issues/detail?id=173 |
| 93 | """ |
| 94 | bundle = zip_package.ZipPackage(ROOT_DIR) |
| 95 | bundle.add_buffer( |
| 96 | 'run_isolated.zip', |
| 97 | run_isolated.get_as_zip_package().zip_into_buffer(compress=False)) |
| 98 | bundle_url = isolated_upload_zip_bundle(isolate_server, bundle) |
| 99 | return [(bundle_url, 'swarm_data.zip')] |
| 100 | |
| 101 | |
| 102 | def isolated_get_run_commands( |
| 103 | isolate_server, namespace, isolated_hash, extra_args, verbose): |
| 104 | """Returns the 'commands' to run an isolated task via run_isolated.zip. |
| 105 | |
| 106 | Returns: |
| 107 | commands list to be added to the request. |
| 108 | """ |
| 109 | run_cmd = [ |
| 110 | 'python', 'run_isolated.zip', |
| 111 | '--isolated', isolated_hash, |
| 112 | '--isolate-server', isolate_server, |
| 113 | '--namespace', namespace, |
| 114 | ] |
| 115 | if verbose: |
| 116 | run_cmd.append('--verbose') |
| 117 | # Pass all extra args for run_isolated.py, it will pass them to the command. |
| 118 | if extra_args: |
| 119 | run_cmd.append('--') |
| 120 | run_cmd.extend(extra_args) |
| 121 | return run_cmd |
| 122 | |
| 123 | |
| 124 | def isolated_archive(isolate_server, namespace, isolated, algo, verbose): |
| 125 | """Archives a .isolated and all the dependencies on the Isolate Server.""" |
| 126 | logging.info( |
| 127 | 'isolated_archive(%s, %s, %s)', isolate_server, namespace, isolated) |
| 128 | print('Archiving: %s' % isolated) |
| 129 | cmd = [ |
| 130 | sys.executable, |
| 131 | os.path.join(ROOT_DIR, 'isolate.py'), |
| 132 | 'archive', |
| 133 | '--isolate-server', isolate_server, |
| 134 | '--namespace', namespace, |
| 135 | '--isolated', isolated, |
| 136 | ] |
| 137 | cmd.extend(['--verbose'] * verbose) |
| 138 | logging.info(' '.join(cmd)) |
| 139 | if subprocess.call(cmd, verbose): |
| 140 | return None |
| 141 | return isolated_format.hash_file(isolated, algo) |
| 142 | |
| 143 | |
| 144 | def isolated_to_hash(isolate_server, namespace, arg, algo, verbose): |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 145 | """Archives a .isolated file if needed. |
| 146 | |
| 147 | Returns the file hash to trigger and a bool specifying if it was a file (True) |
| 148 | or a hash (False). |
| 149 | """ |
| 150 | if arg.endswith('.isolated'): |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 151 | file_hash = isolated_archive(isolate_server, namespace, arg, algo, verbose) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 152 | if not file_hash: |
| 153 | on_error.report('Archival failure %s' % arg) |
| 154 | return None, True |
| 155 | return file_hash, True |
| 156 | elif isolated_format.is_valid_hash(arg, algo): |
| 157 | return arg, False |
| 158 | else: |
| 159 | on_error.report('Invalid hash %s' % arg) |
| 160 | return None, False |
| 161 | |
| 162 | |
| 163 | def isolated_handle_options(options, args): |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 164 | """Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments. |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 165 | |
| 166 | Returns: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 167 | tuple(command, data). |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 168 | """ |
| 169 | isolated_cmd_args = [] |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 170 | if not options.isolated: |
| 171 | if '--' in args: |
| 172 | index = args.index('--') |
| 173 | isolated_cmd_args = args[index+1:] |
| 174 | args = args[:index] |
| 175 | else: |
| 176 | # optparse eats '--' sometimes. |
| 177 | isolated_cmd_args = args[1:] |
| 178 | args = args[:1] |
| 179 | if len(args) != 1: |
| 180 | raise ValueError( |
| 181 | 'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called ' |
| 182 | 'process.') |
| 183 | # Old code. To be removed eventually. |
| 184 | options.isolated, is_file = isolated_to_hash( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 185 | options.isolate_server, options.namespace, args[0], |
| 186 | isolated_format.get_hash_algo(options.namespace), options.verbose) |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 187 | if not options.isolated: |
| 188 | raise ValueError('Invalid argument %s' % args[0]) |
| 189 | elif args: |
| 190 | is_file = False |
| 191 | if '--' in args: |
| 192 | index = args.index('--') |
| 193 | isolated_cmd_args = args[index+1:] |
| 194 | if index != 0: |
| 195 | raise ValueError('Unexpected arguments.') |
| 196 | else: |
| 197 | # optparse eats '--' sometimes. |
| 198 | isolated_cmd_args = args |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 199 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 200 | command = isolated_get_run_commands( |
| 201 | options.isolate_server, options.namespace, options.isolated, |
| 202 | isolated_cmd_args, options.verbose) |
| 203 | |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 204 | # If a file name was passed, use its base name of the isolated hash. |
| 205 | # Otherwise, use user name as an approximation of a task name. |
| 206 | if not options.task_name: |
| 207 | if is_file: |
| 208 | key = os.path.splitext(os.path.basename(args[0]))[0] |
| 209 | else: |
| 210 | key = options.user |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 211 | options.task_name = u'%s/%s/%s' % ( |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 212 | key, |
| 213 | '_'.join( |
| 214 | '%s=%s' % (k, v) |
| 215 | for k, v in sorted(options.dimensions.iteritems())), |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 216 | options.isolated) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 217 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 218 | try: |
| 219 | data = isolated_get_data(options.isolate_server) |
| 220 | except (IOError, OSError): |
| 221 | on_error.report('Failed to upload the zip file') |
| 222 | raise ValueError('Failed to upload the zip file') |
| 223 | |
| 224 | return command, data |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 225 | |
| 226 | |
| 227 | ### Triggering. |
| 228 | |
| 229 | |
| 230 | TaskRequest = collections.namedtuple( |
| 231 | 'TaskRequest', |
| 232 | [ |
| 233 | 'command', |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 234 | 'data', |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 235 | 'dimensions', |
| 236 | 'env', |
| 237 | 'expiration', |
| 238 | 'hard_timeout', |
| 239 | 'idempotent', |
| 240 | 'io_timeout', |
| 241 | 'name', |
| 242 | 'priority', |
| 243 | 'tags', |
| 244 | 'user', |
| 245 | 'verbose', |
| 246 | ]) |
| 247 | |
| 248 | |
| 249 | def task_request_to_raw_request(task_request): |
| 250 | """Returns the json dict expected by the Swarming server for new request. |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 251 | |
| 252 | This is for the v1 client Swarming API. |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 253 | """ |
| 254 | return { |
| 255 | 'name': task_request.name, |
Marc-Antoine Ruel | d863df3 | 2015-01-24 20:34:48 -0500 | [diff] [blame] | 256 | 'parent_task_id': os.environ.get('SWARMING_TASK_ID', ''), |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 257 | 'priority': task_request.priority, |
| 258 | 'properties': { |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 259 | 'commands': [task_request.command], |
| 260 | 'data': task_request.data, |
| 261 | 'dimensions': task_request.dimensions, |
| 262 | 'env': task_request.env, |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 263 | 'execution_timeout_secs': task_request.hard_timeout, |
maruel | f33f3f6 | 2015-09-10 11:33:46 -0700 | [diff] [blame] | 264 | 'io_timeout_secs': task_request.io_timeout, |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 265 | 'idempotent': task_request.idempotent, |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 266 | }, |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 267 | 'scheduling_expiration_secs': task_request.expiration, |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 268 | 'tags': task_request.tags, |
| 269 | 'user': task_request.user, |
| 270 | } |
| 271 | |
| 272 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 273 | def swarming_handshake(swarming): |
| 274 | """Initiates the connection to the Swarming server.""" |
| 275 | headers = {'X-XSRF-Token-Request': '1'} |
| 276 | response = net.url_read_json( |
| 277 | swarming + '/swarming/api/v1/client/handshake', |
| 278 | headers=headers, |
| 279 | data={}) |
| 280 | if not response: |
| 281 | logging.error('Failed to handshake with server') |
| 282 | return None |
| 283 | logging.info('Connected to server version: %s', response['server_version']) |
| 284 | return response['xsrf_token'] |
| 285 | |
| 286 | |
| 287 | def swarming_trigger(swarming, raw_request, xsrf_token): |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 288 | """Triggers a request on the Swarming server and returns the json data. |
| 289 | |
| 290 | It's the low-level function. |
| 291 | |
| 292 | Returns: |
| 293 | { |
| 294 | 'request': { |
| 295 | 'created_ts': u'2010-01-02 03:04:05', |
| 296 | 'name': .. |
| 297 | }, |
| 298 | 'task_id': '12300', |
| 299 | } |
| 300 | """ |
| 301 | logging.info('Triggering: %s', raw_request['name']) |
| 302 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 303 | headers = {'X-XSRF-Token': xsrf_token} |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 304 | result = net.url_read_json( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 305 | swarming + '/swarming/api/v1/client/request', |
| 306 | data=raw_request, |
| 307 | headers=headers) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 308 | if not result: |
| 309 | on_error.report('Failed to trigger task %s' % raw_request['name']) |
| 310 | return None |
| 311 | return result |
| 312 | |
| 313 | |
| 314 | def setup_googletest(env, shards, index): |
| 315 | """Sets googletest specific environment variables.""" |
| 316 | if shards > 1: |
| 317 | env = env.copy() |
| 318 | env['GTEST_SHARD_INDEX'] = str(index) |
| 319 | env['GTEST_TOTAL_SHARDS'] = str(shards) |
| 320 | return env |
| 321 | |
| 322 | |
| 323 | def trigger_task_shards(swarming, task_request, shards): |
| 324 | """Triggers one or many subtasks of a sharded task. |
| 325 | |
| 326 | Returns: |
| 327 | Dict with task details, returned to caller as part of --dump-json output. |
| 328 | None in case of failure. |
| 329 | """ |
| 330 | def convert(index): |
| 331 | req = task_request |
| 332 | if shards > 1: |
| 333 | req = req._replace( |
| 334 | env=setup_googletest(req.env, shards, index), |
| 335 | name='%s:%s:%s' % (req.name, index, shards)) |
| 336 | return task_request_to_raw_request(req) |
| 337 | |
| 338 | requests = [convert(index) for index in xrange(shards)] |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 339 | xsrf_token = swarming_handshake(swarming) |
| 340 | if not xsrf_token: |
| 341 | return None |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 342 | tasks = {} |
| 343 | priority_warning = False |
| 344 | for index, request in enumerate(requests): |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 345 | task = swarming_trigger(swarming, request, xsrf_token) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 346 | if not task: |
| 347 | break |
| 348 | logging.info('Request result: %s', task) |
| 349 | if (not priority_warning and |
| 350 | task['request']['priority'] != task_request.priority): |
| 351 | priority_warning = True |
| 352 | print >> sys.stderr, ( |
| 353 | 'Priority was reset to %s' % task['request']['priority']) |
| 354 | tasks[request['name']] = { |
| 355 | 'shard_index': index, |
| 356 | 'task_id': task['task_id'], |
| 357 | 'view_url': '%s/user/task/%s' % (swarming, task['task_id']), |
| 358 | } |
| 359 | |
| 360 | # Some shards weren't triggered. Abort everything. |
| 361 | if len(tasks) != len(requests): |
| 362 | if tasks: |
| 363 | print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % ( |
| 364 | len(tasks), len(requests)) |
| 365 | for task_dict in tasks.itervalues(): |
| 366 | abort_task(swarming, task_dict['task_id']) |
| 367 | return None |
| 368 | |
| 369 | return tasks |
| 370 | |
| 371 | |
| 372 | ### Collection. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 373 | |
| 374 | |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 375 | # How often to print status updates to stdout in 'collect'. |
| 376 | STATUS_UPDATE_INTERVAL = 15 * 60. |
| 377 | |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 378 | |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 379 | class State(object): |
| 380 | """States in which a task can be. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 381 | |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 382 | WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These |
| 383 | values are part of the API so if they change, the API changed. |
| 384 | |
| 385 | It's in fact an enum. Values should be in decreasing order of importance. |
| 386 | """ |
| 387 | RUNNING = 0x10 |
| 388 | PENDING = 0x20 |
| 389 | EXPIRED = 0x30 |
| 390 | TIMED_OUT = 0x40 |
| 391 | BOT_DIED = 0x50 |
| 392 | CANCELED = 0x60 |
| 393 | COMPLETED = 0x70 |
| 394 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 395 | STATES = (RUNNING, PENDING, EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED) |
| 396 | STATES_RUNNING = (RUNNING, PENDING) |
| 397 | STATES_NOT_RUNNING = (EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED) |
| 398 | STATES_DONE = (TIMED_OUT, COMPLETED) |
| 399 | STATES_ABANDONED = (EXPIRED, BOT_DIED, CANCELED) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 400 | |
| 401 | _NAMES = { |
| 402 | RUNNING: 'Running', |
| 403 | PENDING: 'Pending', |
| 404 | EXPIRED: 'Expired', |
| 405 | TIMED_OUT: 'Execution timed out', |
| 406 | BOT_DIED: 'Bot died', |
| 407 | CANCELED: 'User canceled', |
| 408 | COMPLETED: 'Completed', |
| 409 | } |
| 410 | |
| 411 | @classmethod |
| 412 | def to_string(cls, state): |
| 413 | """Returns a user-readable string representing a State.""" |
| 414 | if state not in cls._NAMES: |
| 415 | raise ValueError('Invalid state %s' % state) |
| 416 | return cls._NAMES[state] |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 417 | |
| 418 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 419 | class TaskOutputCollector(object): |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 420 | """Assembles task execution summary (for --task-summary-json output). |
| 421 | |
| 422 | Optionally fetches task outputs from isolate server to local disk (used when |
| 423 | --task-output-dir is passed). |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 424 | |
| 425 | This object is shared among multiple threads running 'retrieve_results' |
| 426 | function, in particular they call 'process_shard_result' method in parallel. |
| 427 | """ |
| 428 | |
| 429 | def __init__(self, task_output_dir, task_name, shard_count): |
| 430 | """Initializes TaskOutputCollector, ensures |task_output_dir| exists. |
| 431 | |
| 432 | Args: |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 433 | task_output_dir: (optional) local directory to put fetched files to. |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 434 | task_name: name of the swarming task results belong to. |
| 435 | shard_count: expected number of task shards. |
| 436 | """ |
| 437 | self.task_output_dir = task_output_dir |
| 438 | self.task_name = task_name |
| 439 | self.shard_count = shard_count |
| 440 | |
| 441 | self._lock = threading.Lock() |
| 442 | self._per_shard_results = {} |
| 443 | self._storage = None |
| 444 | |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 445 | if self.task_output_dir and not os.path.isdir(self.task_output_dir): |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 446 | os.makedirs(self.task_output_dir) |
| 447 | |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 448 | def process_shard_result(self, shard_index, result): |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 449 | """Stores results of a single task shard, fetches output files if necessary. |
| 450 | |
Marc-Antoine Ruel | e4dcbb8 | 2014-10-01 09:30:56 -0400 | [diff] [blame] | 451 | Modifies |result| in place. |
| 452 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 453 | Called concurrently from multiple threads. |
| 454 | """ |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 455 | # Sanity check index is in expected range. |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 456 | assert isinstance(shard_index, int) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 457 | if shard_index < 0 or shard_index >= self.shard_count: |
| 458 | logging.warning( |
| 459 | 'Shard index %d is outside of expected range: [0; %d]', |
| 460 | shard_index, self.shard_count - 1) |
| 461 | return |
| 462 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 463 | assert not 'isolated_out' in result |
| 464 | result['isolated_out'] = None |
| 465 | for output in result['outputs']: |
| 466 | isolated_files_location = extract_output_files_location(output) |
| 467 | if isolated_files_location: |
| 468 | if result['isolated_out']: |
| 469 | raise ValueError('Unexpected two task with output') |
| 470 | result['isolated_out'] = isolated_files_location |
Kevin Graney | c2c3b9e | 2014-08-26 09:04:17 -0400 | [diff] [blame] | 471 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 472 | # Store result dict of that shard, ignore results we've already seen. |
| 473 | with self._lock: |
| 474 | if shard_index in self._per_shard_results: |
| 475 | logging.warning('Ignoring duplicate shard index %d', shard_index) |
| 476 | return |
| 477 | self._per_shard_results[shard_index] = result |
| 478 | |
| 479 | # Fetch output files if necessary. |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 480 | if self.task_output_dir and result['isolated_out']: |
Marc-Antoine Ruel | e4dcbb8 | 2014-10-01 09:30:56 -0400 | [diff] [blame] | 481 | storage = self._get_storage( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 482 | result['isolated_out']['server'], |
| 483 | result['isolated_out']['namespace']) |
Marc-Antoine Ruel | e4dcbb8 | 2014-10-01 09:30:56 -0400 | [diff] [blame] | 484 | if storage: |
| 485 | # Output files are supposed to be small and they are not reused across |
| 486 | # tasks. So use MemoryCache for them instead of on-disk cache. Make |
| 487 | # files writable, so that calling script can delete them. |
| 488 | isolateserver.fetch_isolated( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 489 | result['isolated_out']['hash'], |
Marc-Antoine Ruel | e4dcbb8 | 2014-10-01 09:30:56 -0400 | [diff] [blame] | 490 | storage, |
| 491 | isolateserver.MemoryCache(file_mode_mask=0700), |
| 492 | os.path.join(self.task_output_dir, str(shard_index)), |
| 493 | False) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 494 | |
| 495 | def finalize(self): |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 496 | """Assembles and returns task summary JSON, shutdowns underlying Storage.""" |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 497 | with self._lock: |
| 498 | # Write an array of shard results with None for missing shards. |
| 499 | summary = { |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 500 | 'shards': [ |
| 501 | self._per_shard_results.get(i) for i in xrange(self.shard_count) |
| 502 | ], |
| 503 | } |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 504 | # Write summary.json to task_output_dir as well. |
| 505 | if self.task_output_dir: |
| 506 | tools.write_json( |
| 507 | os.path.join(self.task_output_dir, 'summary.json'), |
| 508 | summary, |
| 509 | False) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 510 | if self._storage: |
| 511 | self._storage.close() |
| 512 | self._storage = None |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 513 | return summary |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 514 | |
| 515 | def _get_storage(self, isolate_server, namespace): |
| 516 | """Returns isolateserver.Storage to use to fetch files.""" |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 517 | assert self.task_output_dir |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 518 | with self._lock: |
| 519 | if not self._storage: |
| 520 | self._storage = isolateserver.get_storage(isolate_server, namespace) |
| 521 | else: |
| 522 | # Shards must all use exact same isolate server and namespace. |
| 523 | if self._storage.location != isolate_server: |
| 524 | logging.error( |
| 525 | 'Task shards are using multiple isolate servers: %s and %s', |
| 526 | self._storage.location, isolate_server) |
| 527 | return None |
| 528 | if self._storage.namespace != namespace: |
| 529 | logging.error( |
| 530 | 'Task shards are using multiple namespaces: %s and %s', |
| 531 | self._storage.namespace, namespace) |
| 532 | return None |
| 533 | return self._storage |
| 534 | |
| 535 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 536 | def extract_output_files_location(task_log): |
| 537 | """Task log -> location of task output files to fetch. |
| 538 | |
| 539 | TODO(vadimsh,maruel): Use side-channel to get this information. |
| 540 | See 'run_tha_test' in run_isolated.py for where the data is generated. |
| 541 | |
| 542 | Returns: |
| 543 | Tuple (isolate server URL, namespace, isolated hash) on success. |
| 544 | None if information is missing or can not be parsed. |
| 545 | """ |
| 546 | if not task_log: |
| 547 | return None |
| 548 | match = re.search( |
| 549 | r'\[run_isolated_out_hack\](.*)\[/run_isolated_out_hack\]', |
| 550 | task_log, |
| 551 | re.DOTALL) |
| 552 | if not match: |
| 553 | return None |
| 554 | |
| 555 | def to_ascii(val): |
| 556 | if not isinstance(val, basestring): |
| 557 | raise ValueError() |
| 558 | return val.encode('ascii') |
| 559 | |
| 560 | try: |
| 561 | data = json.loads(match.group(1)) |
| 562 | if not isinstance(data, dict): |
| 563 | raise ValueError() |
| 564 | isolated_hash = to_ascii(data['hash']) |
| 565 | namespace = to_ascii(data['namespace']) |
| 566 | isolate_server = to_ascii(data['storage']) |
| 567 | if not file_path.is_url(isolate_server): |
| 568 | raise ValueError() |
| 569 | data = { |
| 570 | 'hash': isolated_hash, |
| 571 | 'namespace': namespace, |
| 572 | 'server': isolate_server, |
| 573 | 'view_url': '%s/browse?%s' % (isolate_server, urllib.urlencode( |
| 574 | [('namespace', namespace), ('hash', isolated_hash)])), |
| 575 | } |
| 576 | return data |
| 577 | except (KeyError, ValueError): |
| 578 | logging.warning( |
| 579 | 'Unexpected value of run_isolated_out_hack: %s', match.group(1)) |
| 580 | return None |
| 581 | |
| 582 | |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 583 | def now(): |
| 584 | """Exists so it can be mocked easily.""" |
| 585 | return time.time() |
| 586 | |
| 587 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 588 | def retrieve_results( |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 589 | base_url, shard_index, task_id, timeout, should_stop, output_collector): |
| 590 | """Retrieves results for a single task ID. |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 591 | |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 592 | Returns: |
| 593 | <result dict> on success. |
| 594 | None on failure. |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 595 | """ |
maruel@chromium.org | 814d23f | 2013-10-01 19:08:00 +0000 | [diff] [blame] | 596 | assert isinstance(timeout, float), timeout |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 597 | result_url = '%s/swarming/api/v1/client/task/%s' % (base_url, task_id) |
| 598 | output_url = '%s/swarming/api/v1/client/task/%s/output/all' % ( |
| 599 | base_url, task_id) |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 600 | started = now() |
| 601 | deadline = started + timeout if timeout else None |
| 602 | attempt = 0 |
| 603 | |
| 604 | while not should_stop.is_set(): |
| 605 | attempt += 1 |
| 606 | |
| 607 | # Waiting for too long -> give up. |
| 608 | current_time = now() |
| 609 | if deadline and current_time >= deadline: |
| 610 | logging.error('retrieve_results(%s) timed out on attempt %d', |
| 611 | base_url, attempt) |
| 612 | return None |
| 613 | |
| 614 | # Do not spin too fast. Spin faster at the beginning though. |
| 615 | # Start with 1 sec delay and for each 30 sec of waiting add another second |
| 616 | # of delay, until hitting 15 sec ceiling. |
| 617 | if attempt > 1: |
| 618 | max_delay = min(15, 1 + (current_time - started) / 30.0) |
| 619 | delay = min(max_delay, deadline - current_time) if deadline else max_delay |
| 620 | if delay > 0: |
| 621 | logging.debug('Waiting %.1f sec before retrying', delay) |
| 622 | should_stop.wait(delay) |
| 623 | if should_stop.is_set(): |
| 624 | return None |
| 625 | |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 626 | # Disable internal retries in net.url_read_json, since we are doing retries |
| 627 | # ourselves. |
| 628 | # TODO(maruel): We'd need to know if it's a 404 and not retry at all. |
| 629 | result = net.url_read_json(result_url, retry_50x=False) |
| 630 | if not result: |
Marc-Antoine Ruel | 200b395 | 2014-08-14 11:07:44 -0400 | [diff] [blame] | 631 | continue |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 632 | if result['state'] in State.STATES_NOT_RUNNING: |
| 633 | out = net.url_read_json(output_url) |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 634 | result['outputs'] = (out or {}).get('outputs', []) |
| 635 | if not result['outputs']: |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 636 | logging.error('No output found for task %s', task_id) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 637 | # Record the result, try to fetch attached output files (if any). |
| 638 | if output_collector: |
| 639 | # TODO(vadimsh): Respect |should_stop| and |deadline| when fetching. |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 640 | output_collector.process_shard_result(shard_index, result) |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 641 | return result |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 642 | |
| 643 | |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 644 | def yield_results( |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 645 | swarm_base_url, task_ids, timeout, max_threads, print_status_updates, |
| 646 | output_collector): |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 647 | """Yields swarming task results from the swarming server as (index, result). |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 648 | |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 649 | Duplicate shards are ignored. Shards are yielded in order of completion. |
| 650 | Timed out shards are NOT yielded at all. Caller can compare number of yielded |
| 651 | shards with len(task_keys) to verify all shards completed. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 652 | |
| 653 | max_threads is optional and is used to limit the number of parallel fetches |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 654 | done. Since in general the number of task_keys is in the range <=10, it's not |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 655 | worth normally to limit the number threads. Mostly used for testing purposes. |
Marc-Antoine Ruel | 5c72034 | 2014-02-21 14:46:14 -0500 | [diff] [blame] | 656 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 657 | output_collector is an optional instance of TaskOutputCollector that will be |
| 658 | used to fetch files produced by a task from isolate server to the local disk. |
| 659 | |
Marc-Antoine Ruel | 5c72034 | 2014-02-21 14:46:14 -0500 | [diff] [blame] | 660 | Yields: |
| 661 | (index, result). In particular, 'result' is defined as the |
| 662 | GetRunnerResults() function in services/swarming/server/test_runner.py. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 663 | """ |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 664 | number_threads = ( |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 665 | min(max_threads, len(task_ids)) if max_threads else len(task_ids)) |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 666 | should_stop = threading.Event() |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 667 | results_channel = threading_utils.TaskChannel() |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 668 | |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 669 | with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool: |
| 670 | try: |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 671 | # Adds a task to the thread pool to call 'retrieve_results' and return |
| 672 | # the results together with shard_index that produced them (as a tuple). |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 673 | def enqueue_retrieve_results(shard_index, task_id): |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 674 | task_fn = lambda *args: (shard_index, retrieve_results(*args)) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 675 | pool.add_task( |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 676 | 0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index, |
| 677 | task_id, timeout, should_stop, output_collector) |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 678 | |
| 679 | # Enqueue 'retrieve_results' calls for each shard key to run in parallel. |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 680 | for shard_index, task_id in enumerate(task_ids): |
| 681 | enqueue_retrieve_results(shard_index, task_id) |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 682 | |
| 683 | # Wait for all of them to finish. |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 684 | shards_remaining = range(len(task_ids)) |
| 685 | active_task_count = len(task_ids) |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 686 | while active_task_count: |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 687 | shard_index, result = None, None |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 688 | try: |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 689 | shard_index, result = results_channel.pull( |
| 690 | timeout=STATUS_UPDATE_INTERVAL) |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 691 | except threading_utils.TaskChannel.Timeout: |
| 692 | if print_status_updates: |
| 693 | print( |
| 694 | 'Waiting for results from the following shards: %s' % |
| 695 | ', '.join(map(str, shards_remaining))) |
| 696 | sys.stdout.flush() |
| 697 | continue |
| 698 | except Exception: |
| 699 | logging.exception('Unexpected exception in retrieve_results') |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 700 | |
| 701 | # A call to 'retrieve_results' finished (successfully or not). |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 702 | active_task_count -= 1 |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 703 | if not result: |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 704 | logging.error('Failed to retrieve the results for a swarming key') |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 705 | continue |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 706 | |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 707 | # Yield back results to the caller. |
| 708 | assert shard_index in shards_remaining |
| 709 | shards_remaining.remove(shard_index) |
| 710 | yield shard_index, result |
Vadim Shtayura | b19319e | 2014-04-27 08:50:06 -0700 | [diff] [blame] | 711 | |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 712 | finally: |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 713 | # Done or aborted with Ctrl+C, kill the remaining threads. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 714 | should_stop.set() |
| 715 | |
| 716 | |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 717 | def decorate_shard_output(swarming, shard_index, metadata): |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 718 | """Returns wrapped output for swarming task shard.""" |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 719 | def t(d): |
| 720 | return datetime.datetime.strptime(d, '%Y-%m-%d %H:%M:%S') |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 721 | if metadata.get('started_ts'): |
| 722 | pending = '%.1fs' % ( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 723 | t(metadata['started_ts']) - t(metadata['created_ts'])).total_seconds() |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 724 | else: |
| 725 | pending = 'N/A' |
| 726 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 727 | if metadata.get('durations'): |
| 728 | duration = '%.1fs' % metadata['durations'][0] |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 729 | else: |
| 730 | duration = 'N/A' |
| 731 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 732 | if metadata.get('exit_codes'): |
| 733 | exit_code = '%d' % metadata['exit_codes'][0] |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 734 | else: |
| 735 | exit_code = 'N/A' |
| 736 | |
| 737 | bot_id = metadata.get('bot_id') or 'N/A' |
| 738 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 739 | url = '%s/user/task/%s' % (swarming, metadata['id']) |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 740 | tag_header = 'Shard %d %s' % (shard_index, url) |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 741 | tag_footer = ( |
| 742 | 'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % ( |
| 743 | shard_index, pending, duration, bot_id, exit_code)) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 744 | |
| 745 | tag_len = max(len(tag_header), len(tag_footer)) |
| 746 | dash_pad = '+-%s-+\n' % ('-' * tag_len) |
| 747 | tag_header = '| %s |\n' % tag_header.ljust(tag_len) |
| 748 | tag_footer = '| %s |\n' % tag_footer.ljust(tag_len) |
| 749 | |
| 750 | header = dash_pad + tag_header + dash_pad |
| 751 | footer = dash_pad + tag_footer + dash_pad[:-1] |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 752 | output = '\n'.join(o for o in metadata['outputs'] if o).rstrip() + '\n' |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 753 | return header + output + footer |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 754 | |
| 755 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 756 | def collect( |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 757 | swarming, task_name, task_ids, timeout, decorate, print_status_updates, |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 758 | task_summary_json, task_output_dir): |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 759 | """Retrieves results of a Swarming task.""" |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 760 | # Collect summary JSON and output files (if task_output_dir is not None). |
| 761 | output_collector = TaskOutputCollector( |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 762 | task_output_dir, task_name, len(task_ids)) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 763 | |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 764 | seen_shards = set() |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 765 | exit_code = 0 |
Marc-Antoine Ruel | d59e807 | 2014-10-21 18:54:45 -0400 | [diff] [blame] | 766 | total_duration = 0 |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 767 | try: |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 768 | for index, metadata in yield_results( |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 769 | swarming, task_ids, timeout, None, print_status_updates, |
| 770 | output_collector): |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 771 | seen_shards.add(index) |
Vadim Shtayura | 473455a | 2014-05-14 15:22:35 -0700 | [diff] [blame] | 772 | |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 773 | # Default to failure if there was no process that even started. |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 774 | shard_exit_code = 1 |
| 775 | if metadata.get('exit_codes'): |
| 776 | shard_exit_code = metadata['exit_codes'][0] |
maruel | 8db72b7 | 2015-09-02 13:28:11 -0700 | [diff] [blame] | 777 | if shard_exit_code: |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 778 | exit_code = shard_exit_code |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 779 | if metadata.get('durations'): |
| 780 | total_duration += metadata['durations'][0] |
Vadim Shtayura | 473455a | 2014-05-14 15:22:35 -0700 | [diff] [blame] | 781 | |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 782 | if decorate: |
Marc-Antoine Ruel | 5e6ccdb | 2015-04-02 15:55:13 -0400 | [diff] [blame] | 783 | print(decorate_shard_output(swarming, index, metadata)) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 784 | if len(seen_shards) < len(task_ids): |
| 785 | print('') |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 786 | else: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 787 | if metadata.get('exit_codes'): |
| 788 | exit_code = metadata['exit_codes'][0] |
| 789 | else: |
| 790 | exit_code = 'N/A' |
| 791 | print('%s: %s %s' % |
| 792 | (metadata.get('bot_id') or 'N/A', metadata['id'], exit_code)) |
| 793 | for output in metadata['outputs']: |
| 794 | if not output: |
| 795 | continue |
| 796 | output = output.rstrip() |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 797 | if output: |
| 798 | print(''.join(' %s\n' % l for l in output.splitlines())) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 799 | finally: |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 800 | summary = output_collector.finalize() |
| 801 | if task_summary_json: |
| 802 | tools.write_json(task_summary_json, summary, False) |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 803 | |
Marc-Antoine Ruel | d59e807 | 2014-10-21 18:54:45 -0400 | [diff] [blame] | 804 | if decorate and total_duration: |
| 805 | print('Total duration: %.1fs' % total_duration) |
| 806 | |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 807 | if len(seen_shards) != len(task_ids): |
| 808 | missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards] |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 809 | print >> sys.stderr, ('Results from some shards are missing: %s' % |
| 810 | ', '.join(map(str, missing_shards))) |
Vadim Shtayura | c524f51 | 2014-05-15 09:54:56 -0700 | [diff] [blame] | 811 | return 1 |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 812 | |
Marc-Antoine Ruel | 4e6b73d | 2014-10-03 18:00:05 -0400 | [diff] [blame] | 813 | return exit_code |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 814 | |
| 815 | |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 816 | ### Commands. |
| 817 | |
| 818 | |
| 819 | def abort_task(_swarming, _manifest): |
| 820 | """Given a task manifest that was triggered, aborts its execution.""" |
| 821 | # TODO(vadimsh): No supported by the server yet. |
| 822 | |
| 823 | |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 824 | def add_filter_options(parser): |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 825 | parser.filter_group = optparse.OptionGroup(parser, 'Filtering slaves') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 826 | parser.filter_group.add_option( |
Marc-Antoine Ruel | b39e8cf | 2014-01-20 10:39:31 -0500 | [diff] [blame] | 827 | '-d', '--dimension', default=[], action='append', nargs=2, |
Marc-Antoine Ruel | 92f3242 | 2013-11-06 18:12:13 -0500 | [diff] [blame] | 828 | dest='dimensions', metavar='FOO bar', |
| 829 | help='dimension to filter on') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 830 | parser.add_option_group(parser.filter_group) |
| 831 | |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 832 | |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 833 | def add_sharding_options(parser): |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 834 | parser.sharding_group = optparse.OptionGroup(parser, 'Sharding options') |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 835 | parser.sharding_group.add_option( |
| 836 | '--shards', type='int', default=1, |
| 837 | help='Number of shards to trigger and collect.') |
| 838 | parser.add_option_group(parser.sharding_group) |
| 839 | |
| 840 | |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 841 | def add_trigger_options(parser): |
| 842 | """Adds all options to trigger a task on Swarming.""" |
Marc-Antoine Ruel | f7d737d | 2014-12-10 15:36:29 -0500 | [diff] [blame] | 843 | isolateserver.add_isolate_server_options(parser) |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 844 | add_filter_options(parser) |
| 845 | |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 846 | parser.task_group = optparse.OptionGroup(parser, 'Task properties') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 847 | parser.task_group.add_option( |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 848 | '-s', '--isolated', |
| 849 | help='Hash of the .isolated to grab from the isolate server') |
| 850 | parser.task_group.add_option( |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 851 | '-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar', |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 852 | help='Environment variables to set') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 853 | parser.task_group.add_option( |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 854 | '--priority', type='int', default=100, |
| 855 | help='The lower value, the more important the task is') |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 856 | parser.task_group.add_option( |
Marc-Antoine Ruel | 5b47578 | 2014-02-14 20:57:59 -0500 | [diff] [blame] | 857 | '-T', '--task-name', |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 858 | help='Display name of the task. Defaults to ' |
| 859 | '<base_name>/<dimensions>/<isolated hash>/<timestamp> if an ' |
| 860 | 'isolated file is provided, if a hash is provided, it defaults to ' |
| 861 | '<user>/<dimensions>/<isolated hash>/<timestamp>') |
Marc-Antoine Ruel | 13b7b78 | 2014-03-14 11:14:57 -0400 | [diff] [blame] | 862 | parser.task_group.add_option( |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 863 | '--tags', action='append', default=[], |
| 864 | help='Tags to assign to the task.') |
| 865 | parser.task_group.add_option( |
Marc-Antoine Ruel | 686a287 | 2014-12-05 10:06:29 -0500 | [diff] [blame] | 866 | '--user', default='', |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 867 | help='User associated with the task. Defaults to authenticated user on ' |
| 868 | 'the server.') |
| 869 | parser.task_group.add_option( |
Marc-Antoine Ruel | 0219639 | 2014-10-17 16:29:43 -0400 | [diff] [blame] | 870 | '--idempotent', action='store_true', default=False, |
| 871 | help='When set, the server will actively try to find a previous task ' |
| 872 | 'with the same parameter and return this result instead if possible') |
| 873 | parser.task_group.add_option( |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 874 | '--expiration', type='int', default=6*60*60, |
Marc-Antoine Ruel | 13b7b78 | 2014-03-14 11:14:57 -0400 | [diff] [blame] | 875 | help='Seconds to allow the task to be pending for a bot to run before ' |
| 876 | 'this task request expires.') |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 877 | parser.task_group.add_option( |
Marc-Antoine Ruel | 7714281 | 2014-10-03 11:19:43 -0400 | [diff] [blame] | 878 | '--deadline', type='int', dest='expiration', |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 879 | help=optparse.SUPPRESS_HELP) |
Marc-Antoine Ruel | 7714281 | 2014-10-03 11:19:43 -0400 | [diff] [blame] | 880 | parser.task_group.add_option( |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 881 | '--hard-timeout', type='int', default=60*60, |
| 882 | help='Seconds to allow the task to complete.') |
| 883 | parser.task_group.add_option( |
| 884 | '--io-timeout', type='int', default=20*60, |
| 885 | help='Seconds to allow the task to be silent.') |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 886 | parser.task_group.add_option( |
| 887 | '--raw-cmd', action='store_true', default=False, |
| 888 | help='When set, the command after -- is used as-is without run_isolated. ' |
| 889 | 'In this case, no .isolated file is expected.') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 890 | parser.add_option_group(parser.task_group) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 891 | |
| 892 | |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 893 | def process_trigger_options(parser, options, args): |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 894 | """Processes trigger options and uploads files to isolate server if necessary. |
| 895 | """ |
| 896 | options.dimensions = dict(options.dimensions) |
| 897 | options.env = dict(options.env) |
| 898 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 899 | data = [] |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 900 | if not options.dimensions: |
| 901 | parser.error('Please at least specify one --dimension') |
| 902 | if options.raw_cmd: |
| 903 | if not args: |
| 904 | parser.error( |
| 905 | 'Arguments with --raw-cmd should be passed after -- as command ' |
| 906 | 'delimiter.') |
| 907 | if options.isolate_server: |
| 908 | parser.error('Can\'t use both --raw-cmd and --isolate-server.') |
| 909 | |
| 910 | command = args |
| 911 | if not options.task_name: |
Marc-Antoine Ruel | 185ded4 | 2015-01-28 20:49:18 -0500 | [diff] [blame] | 912 | options.task_name = u'%s/%s' % ( |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 913 | options.user, |
| 914 | '_'.join( |
| 915 | '%s=%s' % (k, v) |
| 916 | for k, v in sorted(options.dimensions.iteritems()))) |
| 917 | else: |
| 918 | isolateserver.process_isolate_server_options(parser, options, False) |
| 919 | try: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 920 | command, data = isolated_handle_options(options, args) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 921 | except ValueError as e: |
| 922 | parser.error(str(e)) |
| 923 | |
| 924 | return TaskRequest( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 925 | command=command, |
| 926 | data=data, |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 927 | dimensions=options.dimensions, |
| 928 | env=options.env, |
| 929 | expiration=options.expiration, |
| 930 | hard_timeout=options.hard_timeout, |
| 931 | idempotent=options.idempotent, |
| 932 | io_timeout=options.io_timeout, |
| 933 | name=options.task_name, |
| 934 | priority=options.priority, |
| 935 | tags=options.tags, |
| 936 | user=options.user, |
| 937 | verbose=options.verbose) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 938 | |
| 939 | |
| 940 | def add_collect_options(parser): |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 941 | parser.server_group.add_option( |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 942 | '-t', '--timeout', |
| 943 | type='float', |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 944 | default=80*60., |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 945 | help='Timeout to wait for result, set to 0 for no timeout; default: ' |
| 946 | '%default s') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 947 | parser.group_logging.add_option( |
| 948 | '--decorate', action='store_true', help='Decorate output') |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 949 | parser.group_logging.add_option( |
| 950 | '--print-status-updates', action='store_true', |
| 951 | help='Print periodic status updates') |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 952 | parser.task_output_group = optparse.OptionGroup(parser, 'Task output') |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 953 | parser.task_output_group.add_option( |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 954 | '--task-summary-json', |
| 955 | metavar='FILE', |
| 956 | help='Dump a summary of task results to this file as json. It contains ' |
| 957 | 'only shards statuses as know to server directly. Any output files ' |
| 958 | 'emitted by the task can be collected by using --task-output-dir') |
| 959 | parser.task_output_group.add_option( |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 960 | '--task-output-dir', |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 961 | metavar='DIR', |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 962 | help='Directory to put task results into. When the task finishes, this ' |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 963 | 'directory contains per-shard directory with output files produced ' |
| 964 | 'by shards: <task-output-dir>/<zero-based-shard-index>/.') |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 965 | parser.add_option_group(parser.task_output_group) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 966 | |
| 967 | |
Marc-Antoine Ruel | 13e7c88 | 2015-03-26 18:19:10 -0400 | [diff] [blame] | 968 | @subcommand.usage('bots...') |
| 969 | def CMDbot_delete(parser, args): |
| 970 | """Forcibly deletes bots from the Swarming server.""" |
| 971 | parser.add_option( |
| 972 | '-f', '--force', action='store_true', |
| 973 | help='Do not prompt for confirmation') |
| 974 | options, args = parser.parse_args(args) |
| 975 | if not args: |
| 976 | parser.error('Please specific bots to delete') |
| 977 | |
| 978 | bots = sorted(args) |
| 979 | if not options.force: |
| 980 | print('Delete the following bots?') |
| 981 | for bot in bots: |
| 982 | print(' %s' % bot) |
| 983 | if raw_input('Continue? [y/N] ') not in ('y', 'Y'): |
| 984 | print('Goodbye.') |
| 985 | return 1 |
| 986 | |
| 987 | result = 0 |
| 988 | for bot in bots: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 989 | url = '%s/swarming/api/v1/client/bot/%s' % (options.swarming, bot) |
Marc-Antoine Ruel | 13e7c88 | 2015-03-26 18:19:10 -0400 | [diff] [blame] | 990 | if net.url_read_json(url, method='DELETE') is None: |
| 991 | print('Deleting %s failed' % bot) |
| 992 | result = 1 |
| 993 | return result |
| 994 | |
| 995 | |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 996 | def CMDbots(parser, args): |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 997 | """Returns information about the bots connected to the Swarming server.""" |
| 998 | add_filter_options(parser) |
| 999 | parser.filter_group.add_option( |
Marc-Antoine Ruel | 2808311 | 2014-03-13 16:34:04 -0400 | [diff] [blame] | 1000 | '--dead-only', action='store_true', |
| 1001 | help='Only print dead bots, useful to reap them and reimage broken bots') |
| 1002 | parser.filter_group.add_option( |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1003 | '-k', '--keep-dead', action='store_true', |
| 1004 | help='Do not filter out dead bots') |
| 1005 | parser.filter_group.add_option( |
| 1006 | '-b', '--bare', action='store_true', |
Marc-Antoine Ruel | e7b0016 | 2014-03-12 16:59:01 -0400 | [diff] [blame] | 1007 | help='Do not print out dimensions') |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1008 | options, args = parser.parse_args(args) |
Marc-Antoine Ruel | 2808311 | 2014-03-13 16:34:04 -0400 | [diff] [blame] | 1009 | |
| 1010 | if options.keep_dead and options.dead_only: |
| 1011 | parser.error('Use only one of --keep-dead and --dead-only') |
Vadim Shtayura | 6b555c1 | 2014-07-23 16:22:18 -0700 | [diff] [blame] | 1012 | |
Marc-Antoine Ruel | c6c579e | 2014-09-08 18:43:45 -0400 | [diff] [blame] | 1013 | bots = [] |
| 1014 | cursor = None |
| 1015 | limit = 250 |
| 1016 | # Iterate via cursors. |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1017 | base_url = options.swarming + '/swarming/api/v1/client/bots?limit=%d' % limit |
Marc-Antoine Ruel | c6c579e | 2014-09-08 18:43:45 -0400 | [diff] [blame] | 1018 | while True: |
| 1019 | url = base_url |
| 1020 | if cursor: |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1021 | url += '&cursor=%s' % urllib.quote(cursor) |
Marc-Antoine Ruel | c6c579e | 2014-09-08 18:43:45 -0400 | [diff] [blame] | 1022 | data = net.url_read_json(url) |
| 1023 | if data is None: |
| 1024 | print >> sys.stderr, 'Failed to access %s' % options.swarming |
| 1025 | return 1 |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1026 | bots.extend(data['items']) |
Marc-Antoine Ruel | c6c579e | 2014-09-08 18:43:45 -0400 | [diff] [blame] | 1027 | cursor = data['cursor'] |
| 1028 | if not cursor: |
| 1029 | break |
| 1030 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1031 | for bot in natsort.natsorted(bots, key=lambda x: x['id']): |
Marc-Antoine Ruel | 2808311 | 2014-03-13 16:34:04 -0400 | [diff] [blame] | 1032 | if options.dead_only: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1033 | if not bot['is_dead']: |
Marc-Antoine Ruel | 2808311 | 2014-03-13 16:34:04 -0400 | [diff] [blame] | 1034 | continue |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1035 | elif not options.keep_dead and bot['is_dead']: |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1036 | continue |
| 1037 | |
Marc-Antoine Ruel | e7b0016 | 2014-03-12 16:59:01 -0400 | [diff] [blame] | 1038 | # If the user requested to filter on dimensions, ensure the bot has all the |
| 1039 | # dimensions requested. |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1040 | dimensions = bot['dimensions'] |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1041 | for key, value in options.dimensions: |
| 1042 | if key not in dimensions: |
| 1043 | break |
Marc-Antoine Ruel | e7b0016 | 2014-03-12 16:59:01 -0400 | [diff] [blame] | 1044 | # A bot can have multiple value for a key, for example, |
| 1045 | # {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will |
| 1046 | # be accepted. |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1047 | if isinstance(dimensions[key], list): |
| 1048 | if value not in dimensions[key]: |
| 1049 | break |
| 1050 | else: |
| 1051 | if value != dimensions[key]: |
| 1052 | break |
| 1053 | else: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1054 | print bot['id'] |
Marc-Antoine Ruel | e7b0016 | 2014-03-12 16:59:01 -0400 | [diff] [blame] | 1055 | if not options.bare: |
Marc-Antoine Ruel | 0a62061 | 2014-08-13 15:47:07 -0400 | [diff] [blame] | 1056 | print ' %s' % json.dumps(dimensions, sort_keys=True) |
Marc-Antoine Ruel | fd49117 | 2014-11-19 19:26:13 -0500 | [diff] [blame] | 1057 | if bot.get('task_id'): |
| 1058 | print ' task: %s' % bot['task_id'] |
Marc-Antoine Ruel | 819fb16 | 2014-03-12 16:38:26 -0400 | [diff] [blame] | 1059 | return 0 |
| 1060 | |
| 1061 | |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1062 | @subcommand.usage('--json file | task_id...') |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1063 | def CMDcollect(parser, args): |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1064 | """Retrieves results of one or multiple Swarming task by its ID. |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1065 | |
| 1066 | The result can be in multiple part if the execution was sharded. It can |
| 1067 | potentially have retries. |
| 1068 | """ |
| 1069 | add_collect_options(parser) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1070 | parser.add_option( |
| 1071 | '-j', '--json', |
| 1072 | help='Load the task ids from .json as saved by trigger --dump-json') |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1073 | (options, args) = parser.parse_args(args) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1074 | if not args and not options.json: |
| 1075 | parser.error('Must specify at least one task id or --json.') |
| 1076 | if args and options.json: |
| 1077 | parser.error('Only use one of task id or --json.') |
| 1078 | |
| 1079 | if options.json: |
Marc-Antoine Ruel | 9025a78 | 2015-03-17 16:42:59 -0400 | [diff] [blame] | 1080 | try: |
| 1081 | with open(options.json) as f: |
| 1082 | tasks = sorted( |
| 1083 | json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index']) |
| 1084 | args = [t['task_id'] for t in tasks] |
Marc-Antoine Ruel | 5d055ed | 2015-04-22 14:59:56 -0400 | [diff] [blame] | 1085 | except (KeyError, IOError, TypeError, ValueError): |
Marc-Antoine Ruel | 9025a78 | 2015-03-17 16:42:59 -0400 | [diff] [blame] | 1086 | parser.error('Failed to parse %s' % options.json) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1087 | else: |
| 1088 | valid = frozenset('0123456789abcdef') |
| 1089 | if any(not valid.issuperset(task_id) for task_id in args): |
| 1090 | parser.error('Task ids are 0-9a-f.') |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1091 | |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1092 | try: |
| 1093 | return collect( |
| 1094 | options.swarming, |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1095 | None, |
| 1096 | args, |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1097 | options.timeout, |
| 1098 | options.decorate, |
| 1099 | options.print_status_updates, |
| 1100 | options.task_summary_json, |
| 1101 | options.task_output_dir) |
| 1102 | except Failure: |
| 1103 | on_error.report(None) |
| 1104 | return 1 |
| 1105 | |
| 1106 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1107 | @subcommand.usage('[resource name]') |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1108 | def CMDquery(parser, args): |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1109 | """Returns raw JSON information via an URL endpoint. Use 'list' to gather the |
| 1110 | list of valid values from the server. |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1111 | |
| 1112 | Examples: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1113 | Printing the list of known URLs: |
| 1114 | swarming.py query -S https://server-url list |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1115 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1116 | Listing last 50 tasks on a specific bot named 'swarm1' |
| 1117 | swarming.py query -S https://server-url --limit 50 bot/swarm1/tasks |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1118 | """ |
| 1119 | CHUNK_SIZE = 250 |
| 1120 | |
| 1121 | parser.add_option( |
| 1122 | '-L', '--limit', type='int', default=200, |
| 1123 | help='Limit to enforce on limitless items (like number of tasks); ' |
| 1124 | 'default=%default') |
Paweł Hajdan, Jr | 53ef013 | 2015-03-20 17:49:18 +0100 | [diff] [blame] | 1125 | parser.add_option( |
| 1126 | '--json', help='Path to JSON output file (otherwise prints to stdout)') |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1127 | (options, args) = parser.parse_args(args) |
maruel | d8aba22 | 2015-09-03 12:21:19 -0700 | [diff] [blame] | 1128 | if len(args) != 1: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1129 | parser.error('Must specify only one resource name.') |
| 1130 | |
| 1131 | base_url = options.swarming + '/swarming/api/v1/client/' + args[0] |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1132 | url = base_url |
| 1133 | if options.limit: |
Marc-Antoine Ruel | ea74f29 | 2014-10-24 20:55:39 -0400 | [diff] [blame] | 1134 | # Check check, change if not working out. |
| 1135 | merge_char = '&' if '?' in url else '?' |
| 1136 | url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit)) |
maruel | d8aba22 | 2015-09-03 12:21:19 -0700 | [diff] [blame] | 1137 | data = net.url_read_json(url) |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1138 | if data is None: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1139 | print >> sys.stderr, 'Failed to access %s' % options.swarming |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1140 | return 1 |
| 1141 | |
| 1142 | # Some items support cursors. Try to get automatically if cursors are needed |
| 1143 | # by looking at the 'cursor' items. |
| 1144 | while ( |
| 1145 | data.get('cursor') and |
| 1146 | (not options.limit or len(data['items']) < options.limit)): |
Marc-Antoine Ruel | 0696e40 | 2015-03-23 15:28:44 -0400 | [diff] [blame] | 1147 | merge_char = '&' if '?' in base_url else '?' |
| 1148 | url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor'])) |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1149 | if options.limit: |
| 1150 | url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items'])) |
| 1151 | new = net.url_read_json(url) |
| 1152 | if new is None: |
| 1153 | print >> sys.stderr, 'Failed to access %s' % options.swarming |
| 1154 | return 1 |
| 1155 | data['items'].extend(new['items']) |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1156 | data['cursor'] = new['cursor'] |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1157 | |
| 1158 | if options.limit and len(data.get('items', [])) > options.limit: |
| 1159 | data['items'] = data['items'][:options.limit] |
| 1160 | data.pop('cursor', None) |
| 1161 | |
Paweł Hajdan, Jr | 53ef013 | 2015-03-20 17:49:18 +0100 | [diff] [blame] | 1162 | if options.json: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1163 | with open(options.json, 'w') as f: |
| 1164 | json.dump(data, f) |
Paweł Hajdan, Jr | 53ef013 | 2015-03-20 17:49:18 +0100 | [diff] [blame] | 1165 | else: |
Marc-Antoine Ruel | cda90ee | 2015-03-23 15:13:20 -0400 | [diff] [blame] | 1166 | try: |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1167 | json.dump(data, sys.stdout, indent=2, sort_keys=True) |
Marc-Antoine Ruel | cda90ee | 2015-03-23 15:13:20 -0400 | [diff] [blame] | 1168 | sys.stdout.write('\n') |
| 1169 | except IOError: |
| 1170 | pass |
Marc-Antoine Ruel | 79940ae | 2014-09-23 17:55:41 -0400 | [diff] [blame] | 1171 | return 0 |
| 1172 | |
| 1173 | |
Vadim Shtayura | ae8085b | 2014-05-02 17:13:10 -0700 | [diff] [blame] | 1174 | @subcommand.usage('(hash|isolated) [-- extra_args]') |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1175 | def CMDrun(parser, args): |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1176 | """Triggers a task and wait for the results. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1177 | |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1178 | Basically, does everything to run a command remotely. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1179 | """ |
| 1180 | add_trigger_options(parser) |
| 1181 | add_collect_options(parser) |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 1182 | add_sharding_options(parser) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1183 | options, args = parser.parse_args(args) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1184 | task_request = process_trigger_options(parser, options, args) |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1185 | try: |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1186 | tasks = trigger_task_shards( |
| 1187 | options.swarming, task_request, options.shards) |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1188 | except Failure as e: |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 1189 | on_error.report( |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1190 | 'Failed to trigger %s(%s): %s' % |
| 1191 | (options.task_name, args[0], e.args[0])) |
| 1192 | return 1 |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1193 | if not tasks: |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 1194 | on_error.report('Failed to trigger the task.') |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1195 | return 1 |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1196 | print('Triggered task: %s' % options.task_name) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1197 | task_ids = [ |
| 1198 | t['task_id'] |
| 1199 | for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index']) |
| 1200 | ] |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1201 | try: |
| 1202 | return collect( |
| 1203 | options.swarming, |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1204 | options.task_name, |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1205 | task_ids, |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1206 | options.timeout, |
Vadim Shtayura | 86a2cef | 2014-04-18 11:13:39 -0700 | [diff] [blame] | 1207 | options.decorate, |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 1208 | options.print_status_updates, |
Vadim Shtayura | c8437bf | 2014-07-09 19:45:36 -0700 | [diff] [blame] | 1209 | options.task_summary_json, |
Vadim Shtayura | e3fbd10 | 2014-04-29 17:05:21 -0700 | [diff] [blame] | 1210 | options.task_output_dir) |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 1211 | except Failure: |
| 1212 | on_error.report(None) |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1213 | return 1 |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1214 | |
| 1215 | |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1216 | @subcommand.usage('task_id') |
| 1217 | def CMDreproduce(parser, args): |
| 1218 | """Runs a task locally that was triggered on the server. |
| 1219 | |
| 1220 | This running locally the same commands that have been run on the bot. The data |
| 1221 | downloaded will be in a subdirectory named 'work' of the current working |
| 1222 | directory. |
| 1223 | """ |
| 1224 | options, args = parser.parse_args(args) |
| 1225 | if len(args) != 1: |
| 1226 | parser.error('Must specify exactly one task id.') |
| 1227 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1228 | url = options.swarming + '/swarming/api/v1/client/task/%s/request' % args[0] |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1229 | request = net.url_read_json(url) |
| 1230 | if not request: |
| 1231 | print >> sys.stderr, 'Failed to retrieve request data for the task' |
| 1232 | return 1 |
| 1233 | |
| 1234 | if not os.path.isdir('work'): |
| 1235 | os.mkdir('work') |
| 1236 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1237 | swarming_host = urlparse.urlparse(options.swarming).netloc |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1238 | properties = request['properties'] |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1239 | for data_url, _ in properties['data']: |
| 1240 | assert data_url.startswith('https://'), data_url |
| 1241 | data_host = urlparse.urlparse(data_url).netloc |
| 1242 | if data_host != swarming_host: |
| 1243 | auth.ensure_logged_in('https://' + data_host) |
| 1244 | |
| 1245 | content = net.url_read(data_url) |
| 1246 | if content is None: |
| 1247 | print >> sys.stderr, 'Failed to download %s' % data_url |
| 1248 | return 1 |
| 1249 | with zipfile.ZipFile(StringIO.StringIO(content)) as zip_file: |
| 1250 | zip_file.extractall('work') |
| 1251 | |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1252 | env = None |
| 1253 | if properties['env']: |
| 1254 | env = os.environ.copy() |
Marc-Antoine Ruel | 119b084 | 2014-12-19 15:27:58 -0500 | [diff] [blame] | 1255 | logging.info('env: %r', properties['env']) |
| 1256 | env.update( |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1257 | (k.encode('utf-8'), v.encode('utf-8')) |
| 1258 | for k, v in properties['env'].iteritems()) |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1259 | |
maruel | af6269b | 2015-09-10 14:37:51 -0700 | [diff] [blame^] | 1260 | exit_code = 0 |
| 1261 | for cmd in properties['commands']: |
| 1262 | try: |
| 1263 | c = subprocess.call(cmd, env=env, cwd='work') |
| 1264 | except OSError as e: |
| 1265 | print >> sys.stderr, 'Failed to run: %s' % ' '.join(cmd) |
| 1266 | print >> sys.stderr, str(e) |
| 1267 | c = 1 |
| 1268 | if not exit_code: |
| 1269 | exit_code = c |
| 1270 | return exit_code |
Marc-Antoine Ruel | 13a8127 | 2014-10-07 20:16:43 -0400 | [diff] [blame] | 1271 | |
| 1272 | |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1273 | @subcommand.usage("(hash|isolated) [-- extra_args|raw command]") |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1274 | def CMDtrigger(parser, args): |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1275 | """Triggers a Swarming task. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1276 | |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1277 | Accepts either the hash (sha1) of a .isolated file already uploaded or the |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 1278 | path to an .isolated file to archive. |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1279 | |
| 1280 | If an .isolated file is specified instead of an hash, it is first archived. |
Vadim Shtayura | ae8085b | 2014-05-02 17:13:10 -0700 | [diff] [blame] | 1281 | |
| 1282 | Passes all extra arguments provided after '--' as additional command line |
| 1283 | arguments for an isolated command specified in *.isolate file. |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1284 | """ |
| 1285 | add_trigger_options(parser) |
Vadim Shtayura | b450c60 | 2014-05-12 19:23:25 -0700 | [diff] [blame] | 1286 | add_sharding_options(parser) |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1287 | parser.add_option( |
| 1288 | '--dump-json', |
| 1289 | metavar='FILE', |
| 1290 | help='Dump details about the triggered task(s) to this file as json') |
Marc-Antoine Ruel | 7c54327 | 2013-11-26 13:26:15 -0500 | [diff] [blame] | 1291 | options, args = parser.parse_args(args) |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1292 | task_request = process_trigger_options(parser, options, args) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1293 | try: |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1294 | tasks = trigger_task_shards( |
| 1295 | options.swarming, task_request, options.shards) |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1296 | if tasks: |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1297 | print('Triggered task: %s' % options.task_name) |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 1298 | tasks_sorted = sorted( |
| 1299 | tasks.itervalues(), key=lambda x: x['shard_index']) |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1300 | if options.dump_json: |
| 1301 | data = { |
Marc-Antoine Ruel | efdc528 | 2014-12-12 19:31:00 -0500 | [diff] [blame] | 1302 | 'base_task_name': options.task_name, |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1303 | 'tasks': tasks, |
| 1304 | } |
| 1305 | tools.write_json(options.dump_json, data, True) |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1306 | print('To collect results, use:') |
| 1307 | print(' swarming.py collect -S %s --json %s' % |
| 1308 | (options.swarming, options.dump_json)) |
| 1309 | else: |
Marc-Antoine Ruel | 12a7da4 | 2014-10-01 08:29:47 -0400 | [diff] [blame] | 1310 | print('To collect results, use:') |
| 1311 | print(' swarming.py collect -S %s %s' % |
Marc-Antoine Ruel | 2f6581a | 2014-10-03 11:09:53 -0400 | [diff] [blame] | 1312 | (options.swarming, ' '.join(t['task_id'] for t in tasks_sorted))) |
| 1313 | print('Or visit:') |
| 1314 | for t in tasks_sorted: |
| 1315 | print(' ' + t['view_url']) |
Marc-Antoine Ruel | d6dbe76 | 2014-06-18 13:49:42 -0400 | [diff] [blame] | 1316 | return int(not tasks) |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 1317 | except Failure: |
| 1318 | on_error.report(None) |
vadimsh@chromium.org | d908a54 | 2013-10-30 01:36:17 +0000 | [diff] [blame] | 1319 | return 1 |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1320 | |
| 1321 | |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 1322 | class OptionParserSwarming(logging_utils.OptionParserWithLogging): |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1323 | def __init__(self, **kwargs): |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 1324 | logging_utils.OptionParserWithLogging.__init__( |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1325 | self, prog='swarming.py', **kwargs) |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 1326 | self.server_group = optparse.OptionGroup(self, 'Server') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 1327 | self.server_group.add_option( |
maruel@chromium.org | e9403ab | 2013-09-20 18:03:49 +0000 | [diff] [blame] | 1328 | '-S', '--swarming', |
Kevin Graney | 5346c16 | 2014-01-24 12:20:01 -0500 | [diff] [blame] | 1329 | metavar='URL', default=os.environ.get('SWARMING_SERVER', ''), |
maruel@chromium.org | e9403ab | 2013-09-20 18:03:49 +0000 | [diff] [blame] | 1330 | help='Swarming server to use') |
Marc-Antoine Ruel | 5471e3d | 2013-11-11 19:10:32 -0500 | [diff] [blame] | 1331 | self.add_option_group(self.server_group) |
Vadim Shtayura | e34e13a | 2014-02-02 11:23:26 -0800 | [diff] [blame] | 1332 | auth.add_auth_options(self) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1333 | |
| 1334 | def parse_args(self, *args, **kwargs): |
Marc-Antoine Ruel | f74cffe | 2015-07-15 15:21:34 -0400 | [diff] [blame] | 1335 | options, args = logging_utils.OptionParserWithLogging.parse_args( |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1336 | self, *args, **kwargs) |
Marc-Antoine Ruel | 012067b | 2014-12-10 15:45:42 -0500 | [diff] [blame] | 1337 | auth.process_auth_options(self, options) |
| 1338 | user = self._process_swarming(options) |
| 1339 | if hasattr(options, 'user') and not options.user: |
| 1340 | options.user = user |
| 1341 | return options, args |
| 1342 | |
| 1343 | def _process_swarming(self, options): |
| 1344 | """Processes the --swarming option and aborts if not specified. |
| 1345 | |
| 1346 | Returns the identity as determined by the server. |
| 1347 | """ |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1348 | if not options.swarming: |
| 1349 | self.error('--swarming is required.') |
Marc-Antoine Ruel | 012067b | 2014-12-10 15:45:42 -0500 | [diff] [blame] | 1350 | try: |
| 1351 | options.swarming = net.fix_url(options.swarming) |
| 1352 | except ValueError as e: |
| 1353 | self.error('--swarming %s' % e) |
| 1354 | on_error.report_on_exception_exit(options.swarming) |
Marc-Antoine Ruel | f7d737d | 2014-12-10 15:36:29 -0500 | [diff] [blame] | 1355 | try: |
| 1356 | user = auth.ensure_logged_in(options.swarming) |
| 1357 | except ValueError as e: |
| 1358 | self.error(str(e)) |
Marc-Antoine Ruel | 012067b | 2014-12-10 15:45:42 -0500 | [diff] [blame] | 1359 | return user |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1360 | |
| 1361 | |
| 1362 | def main(args): |
| 1363 | dispatcher = subcommand.CommandDispatcher(__name__) |
Marc-Antoine Ruel | cfb6085 | 2014-07-02 15:22:00 -0400 | [diff] [blame] | 1364 | return dispatcher.execute(OptionParserSwarming(version=__version__), args) |
maruel@chromium.org | 0437a73 | 2013-08-27 16:05:52 +0000 | [diff] [blame] | 1365 | |
| 1366 | |
| 1367 | if __name__ == '__main__': |
| 1368 | fix_encoding.fix_encoding() |
| 1369 | tools.disable_buffering() |
| 1370 | colorama.init() |
| 1371 | sys.exit(main(sys.argv[1:])) |