blob: b88931fde8e118b8d7116549e1f29d9bb0fbb65c [file] [log] [blame]
maruel@chromium.org0437a732013-08-27 16:05:52 +00001#!/usr/bin/env python
Marc-Antoine Ruel8add1242013-11-05 17:28:27 -05002# Copyright 2013 The Swarming Authors. All rights reserved.
Marc-Antoine Ruele98b1122013-11-05 20:27:57 -05003# Use of this source code is governed under the Apache License, Version 2.0 that
4# can be found in the LICENSE file.
maruel@chromium.org0437a732013-08-27 16:05:52 +00005
6"""Client tool to trigger tasks or retrieve results from a Swarming server."""
7
marueld8aba222015-09-03 12:21:19 -07008__version__ = '0.6.3'
maruel@chromium.org0437a732013-08-27 16:05:52 +00009
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -050010import collections
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -040011import datetime
maruel@chromium.org0437a732013-08-27 16:05:52 +000012import json
13import logging
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -040014import optparse
maruel@chromium.org0437a732013-08-27 16:05:52 +000015import os
marueld8aba222015-09-03 12:21:19 -070016import re
17import shutil
18import StringIO
maruel@chromium.org0437a732013-08-27 16:05:52 +000019import subprocess
20import sys
Vadim Shtayurab19319e2014-04-27 08:50:06 -070021import threading
maruel@chromium.org0437a732013-08-27 16:05:52 +000022import time
23import urllib
marueld8aba222015-09-03 12:21:19 -070024import urlparse
25import zipfile
maruel@chromium.org0437a732013-08-27 16:05:52 +000026
27from third_party import colorama
28from third_party.depot_tools import fix_encoding
29from third_party.depot_tools import subcommand
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000030
Marc-Antoine Ruel8806e622014-02-12 14:15:53 -050031from utils import file_path
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -040032from utils import logging_utils
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -040033from third_party.chromium import natsort
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000034from utils import net
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -040035from utils import on_error
maruel@chromium.org0437a732013-08-27 16:05:52 +000036from utils import threading_utils
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000037from utils import tools
marueld8aba222015-09-03 12:21:19 -070038from utils import zip_package
maruel@chromium.org0437a732013-08-27 16:05:52 +000039
Vadim Shtayurae34e13a2014-02-02 11:23:26 -080040import auth
Marc-Antoine Ruel8bee66d2014-08-28 19:02:07 -040041import isolated_format
maruel@chromium.org7b844a62013-09-17 13:04:59 +000042import isolateserver
marueld8aba222015-09-03 12:21:19 -070043import run_isolated
maruel@chromium.org0437a732013-08-27 16:05:52 +000044
45
46ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -050047
48
49class Failure(Exception):
50 """Generic failure."""
51 pass
52
53
54### Isolated file handling.
55
56
marueld8aba222015-09-03 12:21:19 -070057def isolated_upload_zip_bundle(isolate_server, bundle):
58 """Uploads a zip package to Isolate Server and returns raw fetch URL.
59
60 Args:
61 isolate_server: URL of an Isolate Server.
62 bundle: instance of ZipPackage to upload.
63
64 Returns:
65 URL to get the file from.
66 """
67 # Swarming bot needs to be able to grab the file from the Isolate Server using
68 # a simple HTTPS GET. Use 'default' namespace so that the raw data returned to
69 # a bot is not zipped, since the swarming_bot doesn't understand compressed
70 # data. This namespace have nothing to do with |namespace| passed to
71 # run_isolated.py that is used to store files for isolated task.
72 logging.info('Zipping up and uploading files...')
73 start_time = time.time()
74 isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer())
75 with isolateserver.get_storage(isolate_server, 'default') as storage:
76 uploaded = storage.upload_items([isolate_item])
77 bundle_url = storage.get_fetch_url(isolate_item)
78 elapsed = time.time() - start_time
79 if isolate_item in uploaded:
80 logging.info('Upload complete, time elapsed: %f', elapsed)
81 else:
82 logging.info('Zip file already on server, time elapsed: %f', elapsed)
83 return bundle_url
84
85
86def isolated_get_data(isolate_server):
87 """Returns the 'data' section with all files necessary to bootstrap a task
88 execution running an isolated task.
89
90 It's mainly zipping run_isolated.zip over and over again.
91 TODO(maruel): Get rid of this with.
92 https://code.google.com/p/swarming/issues/detail?id=173
93 """
94 bundle = zip_package.ZipPackage(ROOT_DIR)
95 bundle.add_buffer(
96 'run_isolated.zip',
97 run_isolated.get_as_zip_package().zip_into_buffer(compress=False))
98 bundle_url = isolated_upload_zip_bundle(isolate_server, bundle)
99 return [(bundle_url, 'swarm_data.zip')]
100
101
102def isolated_get_run_commands(
103 isolate_server, namespace, isolated_hash, extra_args, verbose):
104 """Returns the 'commands' to run an isolated task via run_isolated.zip.
105
106 Returns:
107 commands list to be added to the request.
108 """
109 run_cmd = [
110 'python', 'run_isolated.zip',
111 '--isolated', isolated_hash,
112 '--isolate-server', isolate_server,
113 '--namespace', namespace,
114 ]
115 if verbose:
116 run_cmd.append('--verbose')
117 # Pass all extra args for run_isolated.py, it will pass them to the command.
118 if extra_args:
119 run_cmd.append('--')
120 run_cmd.extend(extra_args)
121 return run_cmd
122
123
124def isolated_archive(isolate_server, namespace, isolated, algo, verbose):
125 """Archives a .isolated and all the dependencies on the Isolate Server."""
126 logging.info(
127 'isolated_archive(%s, %s, %s)', isolate_server, namespace, isolated)
128 print('Archiving: %s' % isolated)
129 cmd = [
130 sys.executable,
131 os.path.join(ROOT_DIR, 'isolate.py'),
132 'archive',
133 '--isolate-server', isolate_server,
134 '--namespace', namespace,
135 '--isolated', isolated,
136 ]
137 cmd.extend(['--verbose'] * verbose)
138 logging.info(' '.join(cmd))
139 if subprocess.call(cmd, verbose):
140 return None
141 return isolated_format.hash_file(isolated, algo)
142
143
144def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500145 """Archives a .isolated file if needed.
146
147 Returns the file hash to trigger and a bool specifying if it was a file (True)
148 or a hash (False).
149 """
150 if arg.endswith('.isolated'):
marueld8aba222015-09-03 12:21:19 -0700151 file_hash = isolated_archive(isolate_server, namespace, arg, algo, verbose)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500152 if not file_hash:
153 on_error.report('Archival failure %s' % arg)
154 return None, True
155 return file_hash, True
156 elif isolated_format.is_valid_hash(arg, algo):
157 return arg, False
158 else:
159 on_error.report('Invalid hash %s' % arg)
160 return None, False
161
162
163def isolated_handle_options(options, args):
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500164 """Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500165
166 Returns:
marueld8aba222015-09-03 12:21:19 -0700167 tuple(command, data).
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500168 """
169 isolated_cmd_args = []
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500170 if not options.isolated:
171 if '--' in args:
172 index = args.index('--')
173 isolated_cmd_args = args[index+1:]
174 args = args[:index]
175 else:
176 # optparse eats '--' sometimes.
177 isolated_cmd_args = args[1:]
178 args = args[:1]
179 if len(args) != 1:
180 raise ValueError(
181 'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
182 'process.')
183 # Old code. To be removed eventually.
184 options.isolated, is_file = isolated_to_hash(
marueld8aba222015-09-03 12:21:19 -0700185 options.isolate_server, options.namespace, args[0],
186 isolated_format.get_hash_algo(options.namespace), options.verbose)
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500187 if not options.isolated:
188 raise ValueError('Invalid argument %s' % args[0])
189 elif args:
190 is_file = False
191 if '--' in args:
192 index = args.index('--')
193 isolated_cmd_args = args[index+1:]
194 if index != 0:
195 raise ValueError('Unexpected arguments.')
196 else:
197 # optparse eats '--' sometimes.
198 isolated_cmd_args = args
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500199
marueld8aba222015-09-03 12:21:19 -0700200 command = isolated_get_run_commands(
201 options.isolate_server, options.namespace, options.isolated,
202 isolated_cmd_args, options.verbose)
203
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500204 # If a file name was passed, use its base name of the isolated hash.
205 # Otherwise, use user name as an approximation of a task name.
206 if not options.task_name:
207 if is_file:
208 key = os.path.splitext(os.path.basename(args[0]))[0]
209 else:
210 key = options.user
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500211 options.task_name = u'%s/%s/%s' % (
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500212 key,
213 '_'.join(
214 '%s=%s' % (k, v)
215 for k, v in sorted(options.dimensions.iteritems())),
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500216 options.isolated)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500217
marueld8aba222015-09-03 12:21:19 -0700218 try:
219 data = isolated_get_data(options.isolate_server)
220 except (IOError, OSError):
221 on_error.report('Failed to upload the zip file')
222 raise ValueError('Failed to upload the zip file')
223
224 return command, data
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500225
226
227### Triggering.
228
229
230TaskRequest = collections.namedtuple(
231 'TaskRequest',
232 [
233 'command',
marueld8aba222015-09-03 12:21:19 -0700234 'data',
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500235 'dimensions',
236 'env',
237 'expiration',
238 'hard_timeout',
239 'idempotent',
240 'io_timeout',
241 'name',
242 'priority',
243 'tags',
244 'user',
245 'verbose',
246 ])
247
248
249def task_request_to_raw_request(task_request):
250 """Returns the json dict expected by the Swarming server for new request.
marueld8aba222015-09-03 12:21:19 -0700251
252 This is for the v1 client Swarming API.
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500253 """
254 return {
255 'name': task_request.name,
Marc-Antoine Rueld863df32015-01-24 20:34:48 -0500256 'parent_task_id': os.environ.get('SWARMING_TASK_ID', ''),
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500257 'priority': task_request.priority,
258 'properties': {
marueld8aba222015-09-03 12:21:19 -0700259 'commands': [task_request.command],
260 'data': task_request.data,
261 'dimensions': task_request.dimensions,
262 'env': task_request.env,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500263 'execution_timeout_secs': task_request.hard_timeout,
maruel5636d892015-09-02 13:05:39 -0700264 'io_timeout_secs': task_request.io_timeout,
marueld8aba222015-09-03 12:21:19 -0700265 'idempotent': task_request.idempotent,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500266 },
marueld8aba222015-09-03 12:21:19 -0700267 'scheduling_expiration_secs': task_request.expiration,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500268 'tags': task_request.tags,
269 'user': task_request.user,
270 }
271
272
marueld8aba222015-09-03 12:21:19 -0700273def swarming_handshake(swarming):
274 """Initiates the connection to the Swarming server."""
275 headers = {'X-XSRF-Token-Request': '1'}
276 response = net.url_read_json(
277 swarming + '/swarming/api/v1/client/handshake',
278 headers=headers,
279 data={})
280 if not response:
281 logging.error('Failed to handshake with server')
282 return None
283 logging.info('Connected to server version: %s', response['server_version'])
284 return response['xsrf_token']
285
286
287def swarming_trigger(swarming, raw_request, xsrf_token):
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500288 """Triggers a request on the Swarming server and returns the json data.
289
290 It's the low-level function.
291
292 Returns:
293 {
294 'request': {
295 'created_ts': u'2010-01-02 03:04:05',
296 'name': ..
297 },
298 'task_id': '12300',
299 }
300 """
301 logging.info('Triggering: %s', raw_request['name'])
302
marueld8aba222015-09-03 12:21:19 -0700303 headers = {'X-XSRF-Token': xsrf_token}
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500304 result = net.url_read_json(
marueld8aba222015-09-03 12:21:19 -0700305 swarming + '/swarming/api/v1/client/request',
306 data=raw_request,
307 headers=headers)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500308 if not result:
309 on_error.report('Failed to trigger task %s' % raw_request['name'])
310 return None
311 return result
312
313
314def setup_googletest(env, shards, index):
315 """Sets googletest specific environment variables."""
316 if shards > 1:
317 env = env.copy()
318 env['GTEST_SHARD_INDEX'] = str(index)
319 env['GTEST_TOTAL_SHARDS'] = str(shards)
320 return env
321
322
323def trigger_task_shards(swarming, task_request, shards):
324 """Triggers one or many subtasks of a sharded task.
325
326 Returns:
327 Dict with task details, returned to caller as part of --dump-json output.
328 None in case of failure.
329 """
330 def convert(index):
331 req = task_request
332 if shards > 1:
333 req = req._replace(
334 env=setup_googletest(req.env, shards, index),
335 name='%s:%s:%s' % (req.name, index, shards))
336 return task_request_to_raw_request(req)
337
338 requests = [convert(index) for index in xrange(shards)]
marueld8aba222015-09-03 12:21:19 -0700339 xsrf_token = swarming_handshake(swarming)
340 if not xsrf_token:
341 return None
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500342 tasks = {}
343 priority_warning = False
344 for index, request in enumerate(requests):
marueld8aba222015-09-03 12:21:19 -0700345 task = swarming_trigger(swarming, request, xsrf_token)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500346 if not task:
347 break
348 logging.info('Request result: %s', task)
349 if (not priority_warning and
350 task['request']['priority'] != task_request.priority):
351 priority_warning = True
352 print >> sys.stderr, (
353 'Priority was reset to %s' % task['request']['priority'])
354 tasks[request['name']] = {
355 'shard_index': index,
356 'task_id': task['task_id'],
357 'view_url': '%s/user/task/%s' % (swarming, task['task_id']),
358 }
359
360 # Some shards weren't triggered. Abort everything.
361 if len(tasks) != len(requests):
362 if tasks:
363 print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % (
364 len(tasks), len(requests))
365 for task_dict in tasks.itervalues():
366 abort_task(swarming, task_dict['task_id'])
367 return None
368
369 return tasks
370
371
372### Collection.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000373
374
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700375# How often to print status updates to stdout in 'collect'.
376STATUS_UPDATE_INTERVAL = 15 * 60.
377
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400378
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400379class State(object):
380 """States in which a task can be.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000381
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400382 WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These
383 values are part of the API so if they change, the API changed.
384
385 It's in fact an enum. Values should be in decreasing order of importance.
386 """
387 RUNNING = 0x10
388 PENDING = 0x20
389 EXPIRED = 0x30
390 TIMED_OUT = 0x40
391 BOT_DIED = 0x50
392 CANCELED = 0x60
393 COMPLETED = 0x70
394
marueld8aba222015-09-03 12:21:19 -0700395 STATES = (RUNNING, PENDING, EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
396 STATES_RUNNING = (RUNNING, PENDING)
397 STATES_NOT_RUNNING = (EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
398 STATES_DONE = (TIMED_OUT, COMPLETED)
399 STATES_ABANDONED = (EXPIRED, BOT_DIED, CANCELED)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400400
401 _NAMES = {
402 RUNNING: 'Running',
403 PENDING: 'Pending',
404 EXPIRED: 'Expired',
405 TIMED_OUT: 'Execution timed out',
406 BOT_DIED: 'Bot died',
407 CANCELED: 'User canceled',
408 COMPLETED: 'Completed',
409 }
410
411 @classmethod
412 def to_string(cls, state):
413 """Returns a user-readable string representing a State."""
414 if state not in cls._NAMES:
415 raise ValueError('Invalid state %s' % state)
416 return cls._NAMES[state]
maruel@chromium.org0437a732013-08-27 16:05:52 +0000417
418
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700419class TaskOutputCollector(object):
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700420 """Assembles task execution summary (for --task-summary-json output).
421
422 Optionally fetches task outputs from isolate server to local disk (used when
423 --task-output-dir is passed).
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700424
425 This object is shared among multiple threads running 'retrieve_results'
426 function, in particular they call 'process_shard_result' method in parallel.
427 """
428
429 def __init__(self, task_output_dir, task_name, shard_count):
430 """Initializes TaskOutputCollector, ensures |task_output_dir| exists.
431
432 Args:
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700433 task_output_dir: (optional) local directory to put fetched files to.
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700434 task_name: name of the swarming task results belong to.
435 shard_count: expected number of task shards.
436 """
437 self.task_output_dir = task_output_dir
438 self.task_name = task_name
439 self.shard_count = shard_count
440
441 self._lock = threading.Lock()
442 self._per_shard_results = {}
443 self._storage = None
444
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700445 if self.task_output_dir and not os.path.isdir(self.task_output_dir):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700446 os.makedirs(self.task_output_dir)
447
Vadim Shtayurab450c602014-05-12 19:23:25 -0700448 def process_shard_result(self, shard_index, result):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700449 """Stores results of a single task shard, fetches output files if necessary.
450
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400451 Modifies |result| in place.
452
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700453 Called concurrently from multiple threads.
454 """
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700455 # Sanity check index is in expected range.
Vadim Shtayurab450c602014-05-12 19:23:25 -0700456 assert isinstance(shard_index, int)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700457 if shard_index < 0 or shard_index >= self.shard_count:
458 logging.warning(
459 'Shard index %d is outside of expected range: [0; %d]',
460 shard_index, self.shard_count - 1)
461 return
462
marueld8aba222015-09-03 12:21:19 -0700463 assert not 'isolated_out' in result
464 result['isolated_out'] = None
465 for output in result['outputs']:
466 isolated_files_location = extract_output_files_location(output)
467 if isolated_files_location:
468 if result['isolated_out']:
469 raise ValueError('Unexpected two task with output')
470 result['isolated_out'] = isolated_files_location
Kevin Graneyc2c3b9e2014-08-26 09:04:17 -0400471
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700472 # Store result dict of that shard, ignore results we've already seen.
473 with self._lock:
474 if shard_index in self._per_shard_results:
475 logging.warning('Ignoring duplicate shard index %d', shard_index)
476 return
477 self._per_shard_results[shard_index] = result
478
479 # Fetch output files if necessary.
marueld8aba222015-09-03 12:21:19 -0700480 if self.task_output_dir and result['isolated_out']:
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400481 storage = self._get_storage(
marueld8aba222015-09-03 12:21:19 -0700482 result['isolated_out']['server'],
483 result['isolated_out']['namespace'])
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400484 if storage:
485 # Output files are supposed to be small and they are not reused across
486 # tasks. So use MemoryCache for them instead of on-disk cache. Make
487 # files writable, so that calling script can delete them.
488 isolateserver.fetch_isolated(
marueld8aba222015-09-03 12:21:19 -0700489 result['isolated_out']['hash'],
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400490 storage,
491 isolateserver.MemoryCache(file_mode_mask=0700),
492 os.path.join(self.task_output_dir, str(shard_index)),
493 False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700494
495 def finalize(self):
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700496 """Assembles and returns task summary JSON, shutdowns underlying Storage."""
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700497 with self._lock:
498 # Write an array of shard results with None for missing shards.
499 summary = {
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700500 'shards': [
501 self._per_shard_results.get(i) for i in xrange(self.shard_count)
502 ],
503 }
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700504 # Write summary.json to task_output_dir as well.
505 if self.task_output_dir:
506 tools.write_json(
507 os.path.join(self.task_output_dir, 'summary.json'),
508 summary,
509 False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700510 if self._storage:
511 self._storage.close()
512 self._storage = None
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700513 return summary
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700514
515 def _get_storage(self, isolate_server, namespace):
516 """Returns isolateserver.Storage to use to fetch files."""
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700517 assert self.task_output_dir
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700518 with self._lock:
519 if not self._storage:
520 self._storage = isolateserver.get_storage(isolate_server, namespace)
521 else:
522 # Shards must all use exact same isolate server and namespace.
523 if self._storage.location != isolate_server:
524 logging.error(
525 'Task shards are using multiple isolate servers: %s and %s',
526 self._storage.location, isolate_server)
527 return None
528 if self._storage.namespace != namespace:
529 logging.error(
530 'Task shards are using multiple namespaces: %s and %s',
531 self._storage.namespace, namespace)
532 return None
533 return self._storage
534
535
marueld8aba222015-09-03 12:21:19 -0700536def extract_output_files_location(task_log):
537 """Task log -> location of task output files to fetch.
538
539 TODO(vadimsh,maruel): Use side-channel to get this information.
540 See 'run_tha_test' in run_isolated.py for where the data is generated.
541
542 Returns:
543 Tuple (isolate server URL, namespace, isolated hash) on success.
544 None if information is missing or can not be parsed.
545 """
546 if not task_log:
547 return None
548 match = re.search(
549 r'\[run_isolated_out_hack\](.*)\[/run_isolated_out_hack\]',
550 task_log,
551 re.DOTALL)
552 if not match:
553 return None
554
555 def to_ascii(val):
556 if not isinstance(val, basestring):
557 raise ValueError()
558 return val.encode('ascii')
559
560 try:
561 data = json.loads(match.group(1))
562 if not isinstance(data, dict):
563 raise ValueError()
564 isolated_hash = to_ascii(data['hash'])
565 namespace = to_ascii(data['namespace'])
566 isolate_server = to_ascii(data['storage'])
567 if not file_path.is_url(isolate_server):
568 raise ValueError()
569 data = {
570 'hash': isolated_hash,
571 'namespace': namespace,
572 'server': isolate_server,
573 'view_url': '%s/browse?%s' % (isolate_server, urllib.urlencode(
574 [('namespace', namespace), ('hash', isolated_hash)])),
575 }
576 return data
577 except (KeyError, ValueError):
578 logging.warning(
579 'Unexpected value of run_isolated_out_hack: %s', match.group(1))
580 return None
581
582
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500583def now():
584 """Exists so it can be mocked easily."""
585 return time.time()
586
587
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700588def retrieve_results(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400589 base_url, shard_index, task_id, timeout, should_stop, output_collector):
590 """Retrieves results for a single task ID.
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700591
Vadim Shtayurab450c602014-05-12 19:23:25 -0700592 Returns:
593 <result dict> on success.
594 None on failure.
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700595 """
maruel@chromium.org814d23f2013-10-01 19:08:00 +0000596 assert isinstance(timeout, float), timeout
marueld8aba222015-09-03 12:21:19 -0700597 result_url = '%s/swarming/api/v1/client/task/%s' % (base_url, task_id)
598 output_url = '%s/swarming/api/v1/client/task/%s/output/all' % (
599 base_url, task_id)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700600 started = now()
601 deadline = started + timeout if timeout else None
602 attempt = 0
603
604 while not should_stop.is_set():
605 attempt += 1
606
607 # Waiting for too long -> give up.
608 current_time = now()
609 if deadline and current_time >= deadline:
610 logging.error('retrieve_results(%s) timed out on attempt %d',
611 base_url, attempt)
612 return None
613
614 # Do not spin too fast. Spin faster at the beginning though.
615 # Start with 1 sec delay and for each 30 sec of waiting add another second
616 # of delay, until hitting 15 sec ceiling.
617 if attempt > 1:
618 max_delay = min(15, 1 + (current_time - started) / 30.0)
619 delay = min(max_delay, deadline - current_time) if deadline else max_delay
620 if delay > 0:
621 logging.debug('Waiting %.1f sec before retrying', delay)
622 should_stop.wait(delay)
623 if should_stop.is_set():
624 return None
625
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400626 # Disable internal retries in net.url_read_json, since we are doing retries
627 # ourselves.
628 # TODO(maruel): We'd need to know if it's a 404 and not retry at all.
629 result = net.url_read_json(result_url, retry_50x=False)
630 if not result:
Marc-Antoine Ruel200b3952014-08-14 11:07:44 -0400631 continue
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400632 if result['state'] in State.STATES_NOT_RUNNING:
633 out = net.url_read_json(output_url)
marueld8aba222015-09-03 12:21:19 -0700634 result['outputs'] = (out or {}).get('outputs', [])
635 if not result['outputs']:
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400636 logging.error('No output found for task %s', task_id)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700637 # Record the result, try to fetch attached output files (if any).
638 if output_collector:
639 # TODO(vadimsh): Respect |should_stop| and |deadline| when fetching.
Vadim Shtayurab450c602014-05-12 19:23:25 -0700640 output_collector.process_shard_result(shard_index, result)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700641 return result
maruel@chromium.org0437a732013-08-27 16:05:52 +0000642
643
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700644def yield_results(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400645 swarm_base_url, task_ids, timeout, max_threads, print_status_updates,
646 output_collector):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500647 """Yields swarming task results from the swarming server as (index, result).
maruel@chromium.org0437a732013-08-27 16:05:52 +0000648
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700649 Duplicate shards are ignored. Shards are yielded in order of completion.
650 Timed out shards are NOT yielded at all. Caller can compare number of yielded
651 shards with len(task_keys) to verify all shards completed.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000652
653 max_threads is optional and is used to limit the number of parallel fetches
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500654 done. Since in general the number of task_keys is in the range <=10, it's not
maruel@chromium.org0437a732013-08-27 16:05:52 +0000655 worth normally to limit the number threads. Mostly used for testing purposes.
Marc-Antoine Ruel5c720342014-02-21 14:46:14 -0500656
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700657 output_collector is an optional instance of TaskOutputCollector that will be
658 used to fetch files produced by a task from isolate server to the local disk.
659
Marc-Antoine Ruel5c720342014-02-21 14:46:14 -0500660 Yields:
661 (index, result). In particular, 'result' is defined as the
662 GetRunnerResults() function in services/swarming/server/test_runner.py.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000663 """
maruel@chromium.org0437a732013-08-27 16:05:52 +0000664 number_threads = (
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400665 min(max_threads, len(task_ids)) if max_threads else len(task_ids))
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700666 should_stop = threading.Event()
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700667 results_channel = threading_utils.TaskChannel()
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700668
maruel@chromium.org0437a732013-08-27 16:05:52 +0000669 with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool:
670 try:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700671 # Adds a task to the thread pool to call 'retrieve_results' and return
672 # the results together with shard_index that produced them (as a tuple).
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400673 def enqueue_retrieve_results(shard_index, task_id):
Vadim Shtayurab450c602014-05-12 19:23:25 -0700674 task_fn = lambda *args: (shard_index, retrieve_results(*args))
maruel@chromium.org0437a732013-08-27 16:05:52 +0000675 pool.add_task(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400676 0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index,
677 task_id, timeout, should_stop, output_collector)
Vadim Shtayurab450c602014-05-12 19:23:25 -0700678
679 # Enqueue 'retrieve_results' calls for each shard key to run in parallel.
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400680 for shard_index, task_id in enumerate(task_ids):
681 enqueue_retrieve_results(shard_index, task_id)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700682
683 # Wait for all of them to finish.
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400684 shards_remaining = range(len(task_ids))
685 active_task_count = len(task_ids)
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700686 while active_task_count:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700687 shard_index, result = None, None
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700688 try:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700689 shard_index, result = results_channel.pull(
690 timeout=STATUS_UPDATE_INTERVAL)
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700691 except threading_utils.TaskChannel.Timeout:
692 if print_status_updates:
693 print(
694 'Waiting for results from the following shards: %s' %
695 ', '.join(map(str, shards_remaining)))
696 sys.stdout.flush()
697 continue
698 except Exception:
699 logging.exception('Unexpected exception in retrieve_results')
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700700
701 # A call to 'retrieve_results' finished (successfully or not).
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700702 active_task_count -= 1
maruel@chromium.org0437a732013-08-27 16:05:52 +0000703 if not result:
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500704 logging.error('Failed to retrieve the results for a swarming key')
maruel@chromium.org0437a732013-08-27 16:05:52 +0000705 continue
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700706
Vadim Shtayurab450c602014-05-12 19:23:25 -0700707 # Yield back results to the caller.
708 assert shard_index in shards_remaining
709 shards_remaining.remove(shard_index)
710 yield shard_index, result
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700711
maruel@chromium.org0437a732013-08-27 16:05:52 +0000712 finally:
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700713 # Done or aborted with Ctrl+C, kill the remaining threads.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000714 should_stop.set()
715
716
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400717def decorate_shard_output(swarming, shard_index, metadata):
maruel@chromium.org0437a732013-08-27 16:05:52 +0000718 """Returns wrapped output for swarming task shard."""
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400719 def t(d):
marueld8aba222015-09-03 12:21:19 -0700720 return datetime.datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400721 if metadata.get('started_ts'):
722 pending = '%.1fs' % (
723 t(metadata['started_ts']) - t(metadata['created_ts'])).total_seconds()
724 else:
725 pending = 'N/A'
726
marueld8aba222015-09-03 12:21:19 -0700727 if metadata.get('durations'):
728 duration = '%.1fs' % metadata['durations'][0]
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400729 else:
730 duration = 'N/A'
731
marueld8aba222015-09-03 12:21:19 -0700732 if metadata.get('exit_codes'):
733 exit_code = '%d' % metadata['exit_codes'][0]
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400734 else:
735 exit_code = 'N/A'
736
737 bot_id = metadata.get('bot_id') or 'N/A'
738
marueld8aba222015-09-03 12:21:19 -0700739 url = '%s/user/task/%s' % (swarming, metadata['id'])
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400740 tag_header = 'Shard %d %s' % (shard_index, url)
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400741 tag_footer = (
742 'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % (
743 shard_index, pending, duration, bot_id, exit_code))
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400744
745 tag_len = max(len(tag_header), len(tag_footer))
746 dash_pad = '+-%s-+\n' % ('-' * tag_len)
747 tag_header = '| %s |\n' % tag_header.ljust(tag_len)
748 tag_footer = '| %s |\n' % tag_footer.ljust(tag_len)
749
750 header = dash_pad + tag_header + dash_pad
751 footer = dash_pad + tag_footer + dash_pad[:-1]
marueld8aba222015-09-03 12:21:19 -0700752 output = '\n'.join(o for o in metadata['outputs'] if o).rstrip() + '\n'
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400753 return header + output + footer
maruel@chromium.org0437a732013-08-27 16:05:52 +0000754
755
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700756def collect(
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400757 swarming, task_name, task_ids, timeout, decorate, print_status_updates,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400758 task_summary_json, task_output_dir):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500759 """Retrieves results of a Swarming task."""
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700760 # Collect summary JSON and output files (if task_output_dir is not None).
761 output_collector = TaskOutputCollector(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400762 task_output_dir, task_name, len(task_ids))
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700763
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700764 seen_shards = set()
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400765 exit_code = 0
Marc-Antoine Rueld59e8072014-10-21 18:54:45 -0400766 total_duration = 0
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700767 try:
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400768 for index, metadata in yield_results(
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400769 swarming, task_ids, timeout, None, print_status_updates,
770 output_collector):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700771 seen_shards.add(index)
Vadim Shtayura473455a2014-05-14 15:22:35 -0700772
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400773 # Default to failure if there was no process that even started.
marueld8aba222015-09-03 12:21:19 -0700774 shard_exit_code = 1
775 if metadata.get('exit_codes'):
776 shard_exit_code = metadata['exit_codes'][0]
maruel8db72b72015-09-02 13:28:11 -0700777 if shard_exit_code:
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400778 exit_code = shard_exit_code
marueld8aba222015-09-03 12:21:19 -0700779 if metadata.get('durations'):
780 total_duration += metadata['durations'][0]
Vadim Shtayura473455a2014-05-14 15:22:35 -0700781
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700782 if decorate:
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400783 print(decorate_shard_output(swarming, index, metadata))
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400784 if len(seen_shards) < len(task_ids):
785 print('')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700786 else:
marueld8aba222015-09-03 12:21:19 -0700787 if metadata.get('exit_codes'):
788 exit_code = metadata['exit_codes'][0]
789 else:
790 exit_code = 'N/A'
791 print('%s: %s %s' %
792 (metadata.get('bot_id') or 'N/A', metadata['id'], exit_code))
793 for output in metadata['outputs']:
794 if not output:
795 continue
796 output = output.rstrip()
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400797 if output:
798 print(''.join(' %s\n' % l for l in output.splitlines()))
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700799 finally:
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700800 summary = output_collector.finalize()
801 if task_summary_json:
802 tools.write_json(task_summary_json, summary, False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700803
Marc-Antoine Rueld59e8072014-10-21 18:54:45 -0400804 if decorate and total_duration:
805 print('Total duration: %.1fs' % total_duration)
806
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400807 if len(seen_shards) != len(task_ids):
808 missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards]
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700809 print >> sys.stderr, ('Results from some shards are missing: %s' %
810 ', '.join(map(str, missing_shards)))
Vadim Shtayurac524f512014-05-15 09:54:56 -0700811 return 1
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700812
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400813 return exit_code
maruel@chromium.org0437a732013-08-27 16:05:52 +0000814
815
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500816### Commands.
817
818
819def abort_task(_swarming, _manifest):
820 """Given a task manifest that was triggered, aborts its execution."""
821 # TODO(vadimsh): No supported by the server yet.
822
823
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400824def add_filter_options(parser):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -0400825 parser.filter_group = optparse.OptionGroup(parser, 'Filtering slaves')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500826 parser.filter_group.add_option(
Marc-Antoine Ruelb39e8cf2014-01-20 10:39:31 -0500827 '-d', '--dimension', default=[], action='append', nargs=2,
Marc-Antoine Ruel92f32422013-11-06 18:12:13 -0500828 dest='dimensions', metavar='FOO bar',
829 help='dimension to filter on')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500830 parser.add_option_group(parser.filter_group)
831
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400832
Vadim Shtayurab450c602014-05-12 19:23:25 -0700833def add_sharding_options(parser):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -0400834 parser.sharding_group = optparse.OptionGroup(parser, 'Sharding options')
Vadim Shtayurab450c602014-05-12 19:23:25 -0700835 parser.sharding_group.add_option(
836 '--shards', type='int', default=1,
837 help='Number of shards to trigger and collect.')
838 parser.add_option_group(parser.sharding_group)
839
840
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400841def add_trigger_options(parser):
842 """Adds all options to trigger a task on Swarming."""
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -0500843 isolateserver.add_isolate_server_options(parser)
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400844 add_filter_options(parser)
845
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -0400846 parser.task_group = optparse.OptionGroup(parser, 'Task properties')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500847 parser.task_group.add_option(
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500848 '-s', '--isolated',
849 help='Hash of the .isolated to grab from the isolate server')
850 parser.task_group.add_option(
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500851 '-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar',
Vadim Shtayurab450c602014-05-12 19:23:25 -0700852 help='Environment variables to set')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500853 parser.task_group.add_option(
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500854 '--priority', type='int', default=100,
855 help='The lower value, the more important the task is')
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500856 parser.task_group.add_option(
Marc-Antoine Ruel5b475782014-02-14 20:57:59 -0500857 '-T', '--task-name',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400858 help='Display name of the task. Defaults to '
859 '<base_name>/<dimensions>/<isolated hash>/<timestamp> if an '
860 'isolated file is provided, if a hash is provided, it defaults to '
861 '<user>/<dimensions>/<isolated hash>/<timestamp>')
Marc-Antoine Ruel13b7b782014-03-14 11:14:57 -0400862 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400863 '--tags', action='append', default=[],
864 help='Tags to assign to the task.')
865 parser.task_group.add_option(
Marc-Antoine Ruel686a2872014-12-05 10:06:29 -0500866 '--user', default='',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400867 help='User associated with the task. Defaults to authenticated user on '
868 'the server.')
869 parser.task_group.add_option(
Marc-Antoine Ruel02196392014-10-17 16:29:43 -0400870 '--idempotent', action='store_true', default=False,
871 help='When set, the server will actively try to find a previous task '
872 'with the same parameter and return this result instead if possible')
873 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400874 '--expiration', type='int', default=6*60*60,
Marc-Antoine Ruel13b7b782014-03-14 11:14:57 -0400875 help='Seconds to allow the task to be pending for a bot to run before '
876 'this task request expires.')
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400877 parser.task_group.add_option(
Marc-Antoine Ruel77142812014-10-03 11:19:43 -0400878 '--deadline', type='int', dest='expiration',
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -0400879 help=optparse.SUPPRESS_HELP)
Marc-Antoine Ruel77142812014-10-03 11:19:43 -0400880 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400881 '--hard-timeout', type='int', default=60*60,
882 help='Seconds to allow the task to complete.')
883 parser.task_group.add_option(
884 '--io-timeout', type='int', default=20*60,
885 help='Seconds to allow the task to be silent.')
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500886 parser.task_group.add_option(
887 '--raw-cmd', action='store_true', default=False,
888 help='When set, the command after -- is used as-is without run_isolated. '
889 'In this case, no .isolated file is expected.')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500890 parser.add_option_group(parser.task_group)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000891
892
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500893def process_trigger_options(parser, options, args):
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500894 """Processes trigger options and uploads files to isolate server if necessary.
895 """
896 options.dimensions = dict(options.dimensions)
897 options.env = dict(options.env)
898
marueld8aba222015-09-03 12:21:19 -0700899 data = []
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500900 if not options.dimensions:
901 parser.error('Please at least specify one --dimension')
902 if options.raw_cmd:
903 if not args:
904 parser.error(
905 'Arguments with --raw-cmd should be passed after -- as command '
906 'delimiter.')
907 if options.isolate_server:
908 parser.error('Can\'t use both --raw-cmd and --isolate-server.')
909
910 command = args
911 if not options.task_name:
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500912 options.task_name = u'%s/%s' % (
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500913 options.user,
914 '_'.join(
915 '%s=%s' % (k, v)
916 for k, v in sorted(options.dimensions.iteritems())))
917 else:
918 isolateserver.process_isolate_server_options(parser, options, False)
919 try:
marueld8aba222015-09-03 12:21:19 -0700920 command, data = isolated_handle_options(options, args)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500921 except ValueError as e:
922 parser.error(str(e))
923
924 return TaskRequest(
marueld8aba222015-09-03 12:21:19 -0700925 command=command,
926 data=data,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500927 dimensions=options.dimensions,
928 env=options.env,
929 expiration=options.expiration,
930 hard_timeout=options.hard_timeout,
931 idempotent=options.idempotent,
932 io_timeout=options.io_timeout,
933 name=options.task_name,
934 priority=options.priority,
935 tags=options.tags,
936 user=options.user,
937 verbose=options.verbose)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000938
939
940def add_collect_options(parser):
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500941 parser.server_group.add_option(
maruel@chromium.org0437a732013-08-27 16:05:52 +0000942 '-t', '--timeout',
943 type='float',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400944 default=80*60.,
maruel@chromium.org0437a732013-08-27 16:05:52 +0000945 help='Timeout to wait for result, set to 0 for no timeout; default: '
946 '%default s')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500947 parser.group_logging.add_option(
948 '--decorate', action='store_true', help='Decorate output')
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700949 parser.group_logging.add_option(
950 '--print-status-updates', action='store_true',
951 help='Print periodic status updates')
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -0400952 parser.task_output_group = optparse.OptionGroup(parser, 'Task output')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700953 parser.task_output_group.add_option(
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700954 '--task-summary-json',
955 metavar='FILE',
956 help='Dump a summary of task results to this file as json. It contains '
957 'only shards statuses as know to server directly. Any output files '
958 'emitted by the task can be collected by using --task-output-dir')
959 parser.task_output_group.add_option(
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700960 '--task-output-dir',
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700961 metavar='DIR',
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700962 help='Directory to put task results into. When the task finishes, this '
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700963 'directory contains per-shard directory with output files produced '
964 'by shards: <task-output-dir>/<zero-based-shard-index>/.')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700965 parser.add_option_group(parser.task_output_group)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000966
967
Marc-Antoine Ruel13e7c882015-03-26 18:19:10 -0400968@subcommand.usage('bots...')
969def CMDbot_delete(parser, args):
970 """Forcibly deletes bots from the Swarming server."""
971 parser.add_option(
972 '-f', '--force', action='store_true',
973 help='Do not prompt for confirmation')
974 options, args = parser.parse_args(args)
975 if not args:
976 parser.error('Please specific bots to delete')
977
978 bots = sorted(args)
979 if not options.force:
980 print('Delete the following bots?')
981 for bot in bots:
982 print(' %s' % bot)
983 if raw_input('Continue? [y/N] ') not in ('y', 'Y'):
984 print('Goodbye.')
985 return 1
986
987 result = 0
988 for bot in bots:
marueld8aba222015-09-03 12:21:19 -0700989 url = '%s/swarming/api/v1/client/bot/%s' % (options.swarming, bot)
Marc-Antoine Ruel13e7c882015-03-26 18:19:10 -0400990 if net.url_read_json(url, method='DELETE') is None:
991 print('Deleting %s failed' % bot)
992 result = 1
993 return result
994
995
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -0400996def CMDbots(parser, args):
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400997 """Returns information about the bots connected to the Swarming server."""
998 add_filter_options(parser)
999 parser.filter_group.add_option(
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001000 '--dead-only', action='store_true',
1001 help='Only print dead bots, useful to reap them and reimage broken bots')
1002 parser.filter_group.add_option(
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001003 '-k', '--keep-dead', action='store_true',
1004 help='Do not filter out dead bots')
1005 parser.filter_group.add_option(
1006 '-b', '--bare', action='store_true',
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001007 help='Do not print out dimensions')
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001008 options, args = parser.parse_args(args)
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001009
1010 if options.keep_dead and options.dead_only:
1011 parser.error('Use only one of --keep-dead and --dead-only')
Vadim Shtayura6b555c12014-07-23 16:22:18 -07001012
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001013 bots = []
1014 cursor = None
1015 limit = 250
1016 # Iterate via cursors.
marueld8aba222015-09-03 12:21:19 -07001017 base_url = options.swarming + '/swarming/api/v1/client/bots?limit=%d' % limit
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001018 while True:
1019 url = base_url
1020 if cursor:
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001021 url += '&cursor=%s' % urllib.quote(cursor)
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001022 data = net.url_read_json(url)
1023 if data is None:
1024 print >> sys.stderr, 'Failed to access %s' % options.swarming
1025 return 1
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001026 bots.extend(data['items'])
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001027 cursor = data['cursor']
1028 if not cursor:
1029 break
1030
marueld8aba222015-09-03 12:21:19 -07001031 for bot in natsort.natsorted(bots, key=lambda x: x['id']):
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001032 if options.dead_only:
marueld8aba222015-09-03 12:21:19 -07001033 if not bot['is_dead']:
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001034 continue
marueld8aba222015-09-03 12:21:19 -07001035 elif not options.keep_dead and bot['is_dead']:
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001036 continue
1037
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001038 # If the user requested to filter on dimensions, ensure the bot has all the
1039 # dimensions requested.
marueld8aba222015-09-03 12:21:19 -07001040 dimensions = bot['dimensions']
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001041 for key, value in options.dimensions:
1042 if key not in dimensions:
1043 break
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001044 # A bot can have multiple value for a key, for example,
1045 # {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will
1046 # be accepted.
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001047 if isinstance(dimensions[key], list):
1048 if value not in dimensions[key]:
1049 break
1050 else:
1051 if value != dimensions[key]:
1052 break
1053 else:
marueld8aba222015-09-03 12:21:19 -07001054 print bot['id']
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001055 if not options.bare:
Marc-Antoine Ruel0a620612014-08-13 15:47:07 -04001056 print ' %s' % json.dumps(dimensions, sort_keys=True)
Marc-Antoine Ruelfd491172014-11-19 19:26:13 -05001057 if bot.get('task_id'):
1058 print ' task: %s' % bot['task_id']
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001059 return 0
1060
1061
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001062@subcommand.usage('--json file | task_id...')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001063def CMDcollect(parser, args):
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001064 """Retrieves results of one or multiple Swarming task by its ID.
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001065
1066 The result can be in multiple part if the execution was sharded. It can
1067 potentially have retries.
1068 """
1069 add_collect_options(parser)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001070 parser.add_option(
1071 '-j', '--json',
1072 help='Load the task ids from .json as saved by trigger --dump-json')
marueld8aba222015-09-03 12:21:19 -07001073 (options, args) = parser.parse_args(args)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001074 if not args and not options.json:
1075 parser.error('Must specify at least one task id or --json.')
1076 if args and options.json:
1077 parser.error('Only use one of task id or --json.')
1078
1079 if options.json:
Marc-Antoine Ruel9025a782015-03-17 16:42:59 -04001080 try:
1081 with open(options.json) as f:
1082 tasks = sorted(
1083 json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index'])
1084 args = [t['task_id'] for t in tasks]
Marc-Antoine Ruel5d055ed2015-04-22 14:59:56 -04001085 except (KeyError, IOError, TypeError, ValueError):
Marc-Antoine Ruel9025a782015-03-17 16:42:59 -04001086 parser.error('Failed to parse %s' % options.json)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001087 else:
1088 valid = frozenset('0123456789abcdef')
1089 if any(not valid.issuperset(task_id) for task_id in args):
1090 parser.error('Task ids are 0-9a-f.')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001091
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001092 try:
1093 return collect(
1094 options.swarming,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001095 None,
1096 args,
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001097 options.timeout,
1098 options.decorate,
1099 options.print_status_updates,
1100 options.task_summary_json,
1101 options.task_output_dir)
1102 except Failure:
1103 on_error.report(None)
1104 return 1
1105
1106
marueld8aba222015-09-03 12:21:19 -07001107@subcommand.usage('[resource name]')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001108def CMDquery(parser, args):
marueld8aba222015-09-03 12:21:19 -07001109 """Returns raw JSON information via an URL endpoint. Use 'list' to gather the
1110 list of valid values from the server.
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001111
1112 Examples:
marueld8aba222015-09-03 12:21:19 -07001113 Printing the list of known URLs:
1114 swarming.py query -S https://server-url list
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001115
marueld8aba222015-09-03 12:21:19 -07001116 Listing last 50 tasks on a specific bot named 'swarm1'
1117 swarming.py query -S https://server-url --limit 50 bot/swarm1/tasks
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001118 """
1119 CHUNK_SIZE = 250
1120
1121 parser.add_option(
1122 '-L', '--limit', type='int', default=200,
1123 help='Limit to enforce on limitless items (like number of tasks); '
1124 'default=%default')
Paweł Hajdan, Jr53ef0132015-03-20 17:49:18 +01001125 parser.add_option(
1126 '--json', help='Path to JSON output file (otherwise prints to stdout)')
marueld8aba222015-09-03 12:21:19 -07001127 (options, args) = parser.parse_args(args)
1128 if len(args) != 1:
1129 parser.error('Must specify only one resource name.')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001130
marueld8aba222015-09-03 12:21:19 -07001131 base_url = options.swarming + '/swarming/api/v1/client/' + args[0]
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001132 url = base_url
1133 if options.limit:
Marc-Antoine Ruelea74f292014-10-24 20:55:39 -04001134 # Check check, change if not working out.
1135 merge_char = '&' if '?' in url else '?'
1136 url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit))
marueld8aba222015-09-03 12:21:19 -07001137 data = net.url_read_json(url)
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001138 if data is None:
marueld8aba222015-09-03 12:21:19 -07001139 print >> sys.stderr, 'Failed to access %s' % options.swarming
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001140 return 1
1141
1142 # Some items support cursors. Try to get automatically if cursors are needed
1143 # by looking at the 'cursor' items.
1144 while (
1145 data.get('cursor') and
1146 (not options.limit or len(data['items']) < options.limit)):
Marc-Antoine Ruel0696e402015-03-23 15:28:44 -04001147 merge_char = '&' if '?' in base_url else '?'
1148 url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor']))
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001149 if options.limit:
1150 url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items']))
1151 new = net.url_read_json(url)
1152 if new is None:
1153 print >> sys.stderr, 'Failed to access %s' % options.swarming
1154 return 1
1155 data['items'].extend(new['items'])
1156 data['cursor'] = new['cursor']
1157
1158 if options.limit and len(data.get('items', [])) > options.limit:
1159 data['items'] = data['items'][:options.limit]
1160 data.pop('cursor', None)
1161
Paweł Hajdan, Jr53ef0132015-03-20 17:49:18 +01001162 if options.json:
1163 with open(options.json, 'w') as f:
1164 json.dump(data, f)
1165 else:
Marc-Antoine Ruelcda90ee2015-03-23 15:13:20 -04001166 try:
1167 json.dump(data, sys.stdout, indent=2, sort_keys=True)
1168 sys.stdout.write('\n')
1169 except IOError:
1170 pass
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001171 return 0
1172
1173
Vadim Shtayuraae8085b2014-05-02 17:13:10 -07001174@subcommand.usage('(hash|isolated) [-- extra_args]')
maruel@chromium.org0437a732013-08-27 16:05:52 +00001175def CMDrun(parser, args):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001176 """Triggers a task and wait for the results.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001177
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001178 Basically, does everything to run a command remotely.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001179 """
1180 add_trigger_options(parser)
1181 add_collect_options(parser)
Vadim Shtayurab450c602014-05-12 19:23:25 -07001182 add_sharding_options(parser)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001183 options, args = parser.parse_args(args)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001184 task_request = process_trigger_options(parser, options, args)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001185 try:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001186 tasks = trigger_task_shards(
1187 options.swarming, task_request, options.shards)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001188 except Failure as e:
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001189 on_error.report(
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001190 'Failed to trigger %s(%s): %s' %
1191 (options.task_name, args[0], e.args[0]))
1192 return 1
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001193 if not tasks:
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001194 on_error.report('Failed to trigger the task.')
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001195 return 1
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001196 print('Triggered task: %s' % options.task_name)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001197 task_ids = [
1198 t['task_id']
1199 for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index'])
1200 ]
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001201 try:
1202 return collect(
1203 options.swarming,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001204 options.task_name,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001205 task_ids,
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001206 options.timeout,
Vadim Shtayura86a2cef2014-04-18 11:13:39 -07001207 options.decorate,
Vadim Shtayurae3fbd102014-04-29 17:05:21 -07001208 options.print_status_updates,
Vadim Shtayurac8437bf2014-07-09 19:45:36 -07001209 options.task_summary_json,
Vadim Shtayurae3fbd102014-04-29 17:05:21 -07001210 options.task_output_dir)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001211 except Failure:
1212 on_error.report(None)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001213 return 1
maruel@chromium.org0437a732013-08-27 16:05:52 +00001214
1215
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001216@subcommand.usage('task_id')
1217def CMDreproduce(parser, args):
1218 """Runs a task locally that was triggered on the server.
1219
1220 This running locally the same commands that have been run on the bot. The data
1221 downloaded will be in a subdirectory named 'work' of the current working
1222 directory.
1223 """
1224 options, args = parser.parse_args(args)
1225 if len(args) != 1:
1226 parser.error('Must specify exactly one task id.')
1227
marueld8aba222015-09-03 12:21:19 -07001228 url = options.swarming + '/swarming/api/v1/client/task/%s/request' % args[0]
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001229 request = net.url_read_json(url)
1230 if not request:
1231 print >> sys.stderr, 'Failed to retrieve request data for the task'
1232 return 1
1233
1234 if not os.path.isdir('work'):
1235 os.mkdir('work')
1236
marueld8aba222015-09-03 12:21:19 -07001237 swarming_host = urlparse.urlparse(options.swarming).netloc
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001238 properties = request['properties']
marueld8aba222015-09-03 12:21:19 -07001239 for data_url, _ in properties['data']:
1240 assert data_url.startswith('https://'), data_url
1241 data_host = urlparse.urlparse(data_url).netloc
1242 if data_host != swarming_host:
1243 auth.ensure_logged_in('https://' + data_host)
1244
1245 content = net.url_read(data_url)
1246 if content is None:
1247 print >> sys.stderr, 'Failed to download %s' % data_url
1248 return 1
1249 with zipfile.ZipFile(StringIO.StringIO(content)) as zip_file:
1250 zip_file.extractall('work')
1251
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001252 env = None
1253 if properties['env']:
1254 env = os.environ.copy()
Marc-Antoine Ruel119b0842014-12-19 15:27:58 -05001255 logging.info('env: %r', properties['env'])
1256 env.update(
marueld8aba222015-09-03 12:21:19 -07001257 (k.encode('utf-8'), v.encode('utf-8'))
1258 for k, v in properties['env'].iteritems())
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001259
marueld8aba222015-09-03 12:21:19 -07001260 exit_code = 0
1261 for cmd in properties['commands']:
1262 try:
1263 c = subprocess.call(cmd, env=env, cwd='work')
1264 except OSError as e:
1265 print >> sys.stderr, 'Failed to run: %s' % ' '.join(cmd)
1266 print >> sys.stderr, str(e)
1267 c = 1
1268 if not exit_code:
1269 exit_code = c
1270 return exit_code
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001271
1272
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001273@subcommand.usage("(hash|isolated) [-- extra_args|raw command]")
maruel@chromium.org0437a732013-08-27 16:05:52 +00001274def CMDtrigger(parser, args):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001275 """Triggers a Swarming task.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001276
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001277 Accepts either the hash (sha1) of a .isolated file already uploaded or the
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001278 path to an .isolated file to archive.
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001279
1280 If an .isolated file is specified instead of an hash, it is first archived.
Vadim Shtayuraae8085b2014-05-02 17:13:10 -07001281
1282 Passes all extra arguments provided after '--' as additional command line
1283 arguments for an isolated command specified in *.isolate file.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001284 """
1285 add_trigger_options(parser)
Vadim Shtayurab450c602014-05-12 19:23:25 -07001286 add_sharding_options(parser)
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001287 parser.add_option(
1288 '--dump-json',
1289 metavar='FILE',
1290 help='Dump details about the triggered task(s) to this file as json')
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001291 options, args = parser.parse_args(args)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001292 task_request = process_trigger_options(parser, options, args)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001293 try:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001294 tasks = trigger_task_shards(
1295 options.swarming, task_request, options.shards)
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001296 if tasks:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001297 print('Triggered task: %s' % options.task_name)
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001298 tasks_sorted = sorted(
1299 tasks.itervalues(), key=lambda x: x['shard_index'])
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001300 if options.dump_json:
1301 data = {
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001302 'base_task_name': options.task_name,
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001303 'tasks': tasks,
1304 }
1305 tools.write_json(options.dump_json, data, True)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001306 print('To collect results, use:')
1307 print(' swarming.py collect -S %s --json %s' %
1308 (options.swarming, options.dump_json))
1309 else:
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001310 print('To collect results, use:')
1311 print(' swarming.py collect -S %s %s' %
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001312 (options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
1313 print('Or visit:')
1314 for t in tasks_sorted:
1315 print(' ' + t['view_url'])
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001316 return int(not tasks)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001317 except Failure:
1318 on_error.report(None)
vadimsh@chromium.orgd908a542013-10-30 01:36:17 +00001319 return 1
maruel@chromium.org0437a732013-08-27 16:05:52 +00001320
1321
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001322class OptionParserSwarming(logging_utils.OptionParserWithLogging):
maruel@chromium.org0437a732013-08-27 16:05:52 +00001323 def __init__(self, **kwargs):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001324 logging_utils.OptionParserWithLogging.__init__(
maruel@chromium.org0437a732013-08-27 16:05:52 +00001325 self, prog='swarming.py', **kwargs)
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001326 self.server_group = optparse.OptionGroup(self, 'Server')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -05001327 self.server_group.add_option(
maruel@chromium.orge9403ab2013-09-20 18:03:49 +00001328 '-S', '--swarming',
Kevin Graney5346c162014-01-24 12:20:01 -05001329 metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
maruel@chromium.orge9403ab2013-09-20 18:03:49 +00001330 help='Swarming server to use')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -05001331 self.add_option_group(self.server_group)
Vadim Shtayurae34e13a2014-02-02 11:23:26 -08001332 auth.add_auth_options(self)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001333
1334 def parse_args(self, *args, **kwargs):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001335 options, args = logging_utils.OptionParserWithLogging.parse_args(
maruel@chromium.org0437a732013-08-27 16:05:52 +00001336 self, *args, **kwargs)
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001337 auth.process_auth_options(self, options)
1338 user = self._process_swarming(options)
1339 if hasattr(options, 'user') and not options.user:
1340 options.user = user
1341 return options, args
1342
1343 def _process_swarming(self, options):
1344 """Processes the --swarming option and aborts if not specified.
1345
1346 Returns the identity as determined by the server.
1347 """
maruel@chromium.org0437a732013-08-27 16:05:52 +00001348 if not options.swarming:
1349 self.error('--swarming is required.')
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001350 try:
1351 options.swarming = net.fix_url(options.swarming)
1352 except ValueError as e:
1353 self.error('--swarming %s' % e)
1354 on_error.report_on_exception_exit(options.swarming)
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001355 try:
1356 user = auth.ensure_logged_in(options.swarming)
1357 except ValueError as e:
1358 self.error(str(e))
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001359 return user
maruel@chromium.org0437a732013-08-27 16:05:52 +00001360
1361
1362def main(args):
1363 dispatcher = subcommand.CommandDispatcher(__name__)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001364 return dispatcher.execute(OptionParserSwarming(version=__version__), args)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001365
1366
1367if __name__ == '__main__':
1368 fix_encoding.fix_encoding()
1369 tools.disable_buffering()
1370 colorama.init()
1371 sys.exit(main(sys.argv[1:]))