blob: 72f03cdbf45e1b9a850695b33e3f58c42b766450 [file] [log] [blame]
maruel@chromium.org0437a732013-08-27 16:05:52 +00001#!/usr/bin/env python
Marc-Antoine Ruel8add1242013-11-05 17:28:27 -05002# Copyright 2013 The Swarming Authors. All rights reserved.
Marc-Antoine Ruele98b1122013-11-05 20:27:57 -05003# Use of this source code is governed under the Apache License, Version 2.0 that
4# can be found in the LICENSE file.
maruel@chromium.org0437a732013-08-27 16:05:52 +00005
6"""Client tool to trigger tasks or retrieve results from a Swarming server."""
7
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -04008__version__ = '0.6.3'
maruel@chromium.org0437a732013-08-27 16:05:52 +00009
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -050010import collections
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -040011import datetime
maruel@chromium.org0437a732013-08-27 16:05:52 +000012import json
13import logging
14import os
Vadim Shtayurae3fbd102014-04-29 17:05:21 -070015import re
maruel@chromium.org0437a732013-08-27 16:05:52 +000016import shutil
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -040017import StringIO
maruel@chromium.org0437a732013-08-27 16:05:52 +000018import subprocess
19import sys
Vadim Shtayurab19319e2014-04-27 08:50:06 -070020import threading
maruel@chromium.org0437a732013-08-27 16:05:52 +000021import time
22import urllib
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -040023import urlparse
24import zipfile
maruel@chromium.org0437a732013-08-27 16:05:52 +000025
26from third_party import colorama
27from third_party.depot_tools import fix_encoding
28from third_party.depot_tools import subcommand
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000029
Marc-Antoine Ruel8806e622014-02-12 14:15:53 -050030from utils import file_path
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -040031from third_party.chromium import natsort
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000032from utils import net
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -040033from utils import on_error
maruel@chromium.org0437a732013-08-27 16:05:52 +000034from utils import threading_utils
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000035from utils import tools
36from utils import zip_package
maruel@chromium.org0437a732013-08-27 16:05:52 +000037
Vadim Shtayurae34e13a2014-02-02 11:23:26 -080038import auth
Marc-Antoine Ruel8bee66d2014-08-28 19:02:07 -040039import isolated_format
maruel@chromium.org7b844a62013-09-17 13:04:59 +000040import isolateserver
maruel@chromium.org0437a732013-08-27 16:05:52 +000041import run_isolated
42
43
44ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -050045
46
47class Failure(Exception):
48 """Generic failure."""
49 pass
50
51
52### Isolated file handling.
53
54
55def isolated_upload_zip_bundle(isolate_server, bundle):
56 """Uploads a zip package to Isolate Server and returns raw fetch URL.
57
58 Args:
59 isolate_server: URL of an Isolate Server.
60 bundle: instance of ZipPackage to upload.
61
62 Returns:
63 URL to get the file from.
64 """
65 # Swarming bot needs to be able to grab the file from the Isolate Server using
66 # a simple HTTPS GET. Use 'default' namespace so that the raw data returned to
67 # a bot is not zipped, since the swarming_bot doesn't understand compressed
68 # data. This namespace have nothing to do with |namespace| passed to
69 # run_isolated.py that is used to store files for isolated task.
70 logging.info('Zipping up and uploading files...')
71 start_time = time.time()
72 isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer())
73 with isolateserver.get_storage(isolate_server, 'default') as storage:
74 uploaded = storage.upload_items([isolate_item])
75 bundle_url = storage.get_fetch_url(isolate_item)
76 elapsed = time.time() - start_time
77 if isolate_item in uploaded:
78 logging.info('Upload complete, time elapsed: %f', elapsed)
79 else:
80 logging.info('Zip file already on server, time elapsed: %f', elapsed)
81 return bundle_url
82
83
84def isolated_get_data(isolate_server):
85 """Returns the 'data' section with all files necessary to bootstrap a task
86 execution running an isolated task.
87
88 It's mainly zipping run_isolated.zip over and over again.
89 TODO(maruel): Get rid of this with.
90 https://code.google.com/p/swarming/issues/detail?id=173
91 """
92 bundle = zip_package.ZipPackage(ROOT_DIR)
93 bundle.add_buffer(
94 'run_isolated.zip',
95 run_isolated.get_as_zip_package().zip_into_buffer(compress=False))
96 bundle_url = isolated_upload_zip_bundle(isolate_server, bundle)
97 return [(bundle_url, 'swarm_data.zip')]
98
99
100def isolated_get_run_commands(
101 isolate_server, namespace, isolated_hash, extra_args, verbose):
102 """Returns the 'commands' to run an isolated task via run_isolated.zip.
103
104 Returns:
105 commands list to be added to the request.
106 """
107 run_cmd = [
108 'python', 'run_isolated.zip',
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500109 '--isolated', isolated_hash,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500110 '--isolate-server', isolate_server,
111 '--namespace', namespace,
112 ]
113 if verbose:
114 run_cmd.append('--verbose')
115 # Pass all extra args for run_isolated.py, it will pass them to the command.
116 if extra_args:
117 run_cmd.append('--')
118 run_cmd.extend(extra_args)
119 return run_cmd
120
121
122def isolated_archive(isolate_server, namespace, isolated, algo, verbose):
123 """Archives a .isolated and all the dependencies on the Isolate Server."""
124 logging.info(
125 'isolated_archive(%s, %s, %s)', isolate_server, namespace, isolated)
126 print('Archiving: %s' % isolated)
127 cmd = [
128 sys.executable,
129 os.path.join(ROOT_DIR, 'isolate.py'),
130 'archive',
131 '--isolate-server', isolate_server,
132 '--namespace', namespace,
133 '--isolated', isolated,
134 ]
135 cmd.extend(['--verbose'] * verbose)
136 logging.info(' '.join(cmd))
137 if subprocess.call(cmd, verbose):
138 return None
139 return isolated_format.hash_file(isolated, algo)
140
141
142def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
143 """Archives a .isolated file if needed.
144
145 Returns the file hash to trigger and a bool specifying if it was a file (True)
146 or a hash (False).
147 """
148 if arg.endswith('.isolated'):
149 file_hash = isolated_archive(isolate_server, namespace, arg, algo, verbose)
150 if not file_hash:
151 on_error.report('Archival failure %s' % arg)
152 return None, True
153 return file_hash, True
154 elif isolated_format.is_valid_hash(arg, algo):
155 return arg, False
156 else:
157 on_error.report('Invalid hash %s' % arg)
158 return None, False
159
160
161def isolated_handle_options(options, args):
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500162 """Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500163
164 Returns:
165 tuple(command, data).
166 """
167 isolated_cmd_args = []
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500168 if not options.isolated:
169 if '--' in args:
170 index = args.index('--')
171 isolated_cmd_args = args[index+1:]
172 args = args[:index]
173 else:
174 # optparse eats '--' sometimes.
175 isolated_cmd_args = args[1:]
176 args = args[:1]
177 if len(args) != 1:
178 raise ValueError(
179 'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
180 'process.')
181 # Old code. To be removed eventually.
182 options.isolated, is_file = isolated_to_hash(
183 options.isolate_server, options.namespace, args[0],
184 isolated_format.get_hash_algo(options.namespace), options.verbose)
185 if not options.isolated:
186 raise ValueError('Invalid argument %s' % args[0])
187 elif args:
188 is_file = False
189 if '--' in args:
190 index = args.index('--')
191 isolated_cmd_args = args[index+1:]
192 if index != 0:
193 raise ValueError('Unexpected arguments.')
194 else:
195 # optparse eats '--' sometimes.
196 isolated_cmd_args = args
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500197
198 command = isolated_get_run_commands(
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500199 options.isolate_server, options.namespace, options.isolated,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500200 isolated_cmd_args, options.verbose)
201
202 # If a file name was passed, use its base name of the isolated hash.
203 # Otherwise, use user name as an approximation of a task name.
204 if not options.task_name:
205 if is_file:
206 key = os.path.splitext(os.path.basename(args[0]))[0]
207 else:
208 key = options.user
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500209 options.task_name = u'%s/%s/%s' % (
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500210 key,
211 '_'.join(
212 '%s=%s' % (k, v)
213 for k, v in sorted(options.dimensions.iteritems())),
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500214 options.isolated)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500215
216 try:
217 data = isolated_get_data(options.isolate_server)
218 except (IOError, OSError):
219 on_error.report('Failed to upload the zip file')
220 raise ValueError('Failed to upload the zip file')
221
222 return command, data
223
224
225### Triggering.
226
227
228TaskRequest = collections.namedtuple(
229 'TaskRequest',
230 [
231 'command',
232 'data',
233 'dimensions',
234 'env',
235 'expiration',
236 'hard_timeout',
237 'idempotent',
238 'io_timeout',
239 'name',
240 'priority',
241 'tags',
242 'user',
243 'verbose',
244 ])
245
246
247def task_request_to_raw_request(task_request):
248 """Returns the json dict expected by the Swarming server for new request.
249
250 This is for the v1 client Swarming API.
251 """
252 return {
253 'name': task_request.name,
Marc-Antoine Rueld863df32015-01-24 20:34:48 -0500254 'parent_task_id': os.environ.get('SWARMING_TASK_ID', ''),
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500255 'priority': task_request.priority,
256 'properties': {
257 'commands': [task_request.command],
258 'data': task_request.data,
259 'dimensions': task_request.dimensions,
260 'env': task_request.env,
261 'execution_timeout_secs': task_request.hard_timeout,
262 'io_timeout_secs': task_request.io_timeout,
263 'idempotent': task_request.idempotent,
264 },
265 'scheduling_expiration_secs': task_request.expiration,
266 'tags': task_request.tags,
267 'user': task_request.user,
268 }
269
270
271def swarming_handshake(swarming):
272 """Initiates the connection to the Swarming server."""
273 headers = {'X-XSRF-Token-Request': '1'}
274 response = net.url_read_json(
275 swarming + '/swarming/api/v1/client/handshake',
276 headers=headers,
277 data={})
278 if not response:
279 logging.error('Failed to handshake with server')
280 return None
281 logging.info('Connected to server version: %s', response['server_version'])
282 return response['xsrf_token']
283
284
285def swarming_trigger(swarming, raw_request, xsrf_token):
286 """Triggers a request on the Swarming server and returns the json data.
287
288 It's the low-level function.
289
290 Returns:
291 {
292 'request': {
293 'created_ts': u'2010-01-02 03:04:05',
294 'name': ..
295 },
296 'task_id': '12300',
297 }
298 """
299 logging.info('Triggering: %s', raw_request['name'])
300
301 headers = {'X-XSRF-Token': xsrf_token}
302 result = net.url_read_json(
303 swarming + '/swarming/api/v1/client/request',
304 data=raw_request,
305 headers=headers)
306 if not result:
307 on_error.report('Failed to trigger task %s' % raw_request['name'])
308 return None
309 return result
310
311
312def setup_googletest(env, shards, index):
313 """Sets googletest specific environment variables."""
314 if shards > 1:
315 env = env.copy()
316 env['GTEST_SHARD_INDEX'] = str(index)
317 env['GTEST_TOTAL_SHARDS'] = str(shards)
318 return env
319
320
321def trigger_task_shards(swarming, task_request, shards):
322 """Triggers one or many subtasks of a sharded task.
323
324 Returns:
325 Dict with task details, returned to caller as part of --dump-json output.
326 None in case of failure.
327 """
328 def convert(index):
329 req = task_request
330 if shards > 1:
331 req = req._replace(
332 env=setup_googletest(req.env, shards, index),
333 name='%s:%s:%s' % (req.name, index, shards))
334 return task_request_to_raw_request(req)
335
336 requests = [convert(index) for index in xrange(shards)]
337 xsrf_token = swarming_handshake(swarming)
338 if not xsrf_token:
339 return None
340 tasks = {}
341 priority_warning = False
342 for index, request in enumerate(requests):
343 task = swarming_trigger(swarming, request, xsrf_token)
344 if not task:
345 break
346 logging.info('Request result: %s', task)
347 if (not priority_warning and
348 task['request']['priority'] != task_request.priority):
349 priority_warning = True
350 print >> sys.stderr, (
351 'Priority was reset to %s' % task['request']['priority'])
352 tasks[request['name']] = {
353 'shard_index': index,
354 'task_id': task['task_id'],
355 'view_url': '%s/user/task/%s' % (swarming, task['task_id']),
356 }
357
358 # Some shards weren't triggered. Abort everything.
359 if len(tasks) != len(requests):
360 if tasks:
361 print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % (
362 len(tasks), len(requests))
363 for task_dict in tasks.itervalues():
364 abort_task(swarming, task_dict['task_id'])
365 return None
366
367 return tasks
368
369
370### Collection.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000371
372
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700373# How often to print status updates to stdout in 'collect'.
374STATUS_UPDATE_INTERVAL = 15 * 60.
375
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400376
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400377class State(object):
378 """States in which a task can be.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000379
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400380 WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These
381 values are part of the API so if they change, the API changed.
382
383 It's in fact an enum. Values should be in decreasing order of importance.
384 """
385 RUNNING = 0x10
386 PENDING = 0x20
387 EXPIRED = 0x30
388 TIMED_OUT = 0x40
389 BOT_DIED = 0x50
390 CANCELED = 0x60
391 COMPLETED = 0x70
392
393 STATES = (RUNNING, PENDING, EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
394 STATES_RUNNING = (RUNNING, PENDING)
395 STATES_NOT_RUNNING = (EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
396 STATES_DONE = (TIMED_OUT, COMPLETED)
397 STATES_ABANDONED = (EXPIRED, BOT_DIED, CANCELED)
398
399 _NAMES = {
400 RUNNING: 'Running',
401 PENDING: 'Pending',
402 EXPIRED: 'Expired',
403 TIMED_OUT: 'Execution timed out',
404 BOT_DIED: 'Bot died',
405 CANCELED: 'User canceled',
406 COMPLETED: 'Completed',
407 }
408
409 @classmethod
410 def to_string(cls, state):
411 """Returns a user-readable string representing a State."""
412 if state not in cls._NAMES:
413 raise ValueError('Invalid state %s' % state)
414 return cls._NAMES[state]
maruel@chromium.org0437a732013-08-27 16:05:52 +0000415
416
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700417class TaskOutputCollector(object):
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700418 """Assembles task execution summary (for --task-summary-json output).
419
420 Optionally fetches task outputs from isolate server to local disk (used when
421 --task-output-dir is passed).
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700422
423 This object is shared among multiple threads running 'retrieve_results'
424 function, in particular they call 'process_shard_result' method in parallel.
425 """
426
427 def __init__(self, task_output_dir, task_name, shard_count):
428 """Initializes TaskOutputCollector, ensures |task_output_dir| exists.
429
430 Args:
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700431 task_output_dir: (optional) local directory to put fetched files to.
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700432 task_name: name of the swarming task results belong to.
433 shard_count: expected number of task shards.
434 """
435 self.task_output_dir = task_output_dir
436 self.task_name = task_name
437 self.shard_count = shard_count
438
439 self._lock = threading.Lock()
440 self._per_shard_results = {}
441 self._storage = None
442
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700443 if self.task_output_dir and not os.path.isdir(self.task_output_dir):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700444 os.makedirs(self.task_output_dir)
445
Vadim Shtayurab450c602014-05-12 19:23:25 -0700446 def process_shard_result(self, shard_index, result):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700447 """Stores results of a single task shard, fetches output files if necessary.
448
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400449 Modifies |result| in place.
450
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700451 Called concurrently from multiple threads.
452 """
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700453 # Sanity check index is in expected range.
Vadim Shtayurab450c602014-05-12 19:23:25 -0700454 assert isinstance(shard_index, int)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700455 if shard_index < 0 or shard_index >= self.shard_count:
456 logging.warning(
457 'Shard index %d is outside of expected range: [0; %d]',
458 shard_index, self.shard_count - 1)
459 return
460
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400461 assert not 'isolated_out' in result
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400462 result['isolated_out'] = None
463 for output in result['outputs']:
464 isolated_files_location = extract_output_files_location(output)
465 if isolated_files_location:
466 if result['isolated_out']:
467 raise ValueError('Unexpected two task with output')
468 result['isolated_out'] = isolated_files_location
Kevin Graneyc2c3b9e2014-08-26 09:04:17 -0400469
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700470 # Store result dict of that shard, ignore results we've already seen.
471 with self._lock:
472 if shard_index in self._per_shard_results:
473 logging.warning('Ignoring duplicate shard index %d', shard_index)
474 return
475 self._per_shard_results[shard_index] = result
476
477 # Fetch output files if necessary.
Marc-Antoine Ruele4dcbb82014-10-01 09:30:56 -0400478 if self.task_output_dir and result['isolated_out']:
479 storage = self._get_storage(
480 result['isolated_out']['server'],
481 result['isolated_out']['namespace'])
482 if storage:
483 # Output files are supposed to be small and they are not reused across
484 # tasks. So use MemoryCache for them instead of on-disk cache. Make
485 # files writable, so that calling script can delete them.
486 isolateserver.fetch_isolated(
487 result['isolated_out']['hash'],
488 storage,
489 isolateserver.MemoryCache(file_mode_mask=0700),
490 os.path.join(self.task_output_dir, str(shard_index)),
491 False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700492
493 def finalize(self):
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700494 """Assembles and returns task summary JSON, shutdowns underlying Storage."""
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700495 with self._lock:
496 # Write an array of shard results with None for missing shards.
497 summary = {
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700498 'shards': [
499 self._per_shard_results.get(i) for i in xrange(self.shard_count)
500 ],
501 }
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700502 # Write summary.json to task_output_dir as well.
503 if self.task_output_dir:
504 tools.write_json(
505 os.path.join(self.task_output_dir, 'summary.json'),
506 summary,
507 False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700508 if self._storage:
509 self._storage.close()
510 self._storage = None
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700511 return summary
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700512
513 def _get_storage(self, isolate_server, namespace):
514 """Returns isolateserver.Storage to use to fetch files."""
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700515 assert self.task_output_dir
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700516 with self._lock:
517 if not self._storage:
518 self._storage = isolateserver.get_storage(isolate_server, namespace)
519 else:
520 # Shards must all use exact same isolate server and namespace.
521 if self._storage.location != isolate_server:
522 logging.error(
523 'Task shards are using multiple isolate servers: %s and %s',
524 self._storage.location, isolate_server)
525 return None
526 if self._storage.namespace != namespace:
527 logging.error(
528 'Task shards are using multiple namespaces: %s and %s',
529 self._storage.namespace, namespace)
530 return None
531 return self._storage
532
533
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700534def extract_output_files_location(task_log):
535 """Task log -> location of task output files to fetch.
536
537 TODO(vadimsh,maruel): Use side-channel to get this information.
538 See 'run_tha_test' in run_isolated.py for where the data is generated.
539
540 Returns:
541 Tuple (isolate server URL, namespace, isolated hash) on success.
542 None if information is missing or can not be parsed.
543 """
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400544 if not task_log:
545 return None
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700546 match = re.search(
547 r'\[run_isolated_out_hack\](.*)\[/run_isolated_out_hack\]',
548 task_log,
549 re.DOTALL)
550 if not match:
551 return None
552
553 def to_ascii(val):
554 if not isinstance(val, basestring):
555 raise ValueError()
556 return val.encode('ascii')
557
558 try:
559 data = json.loads(match.group(1))
560 if not isinstance(data, dict):
561 raise ValueError()
562 isolated_hash = to_ascii(data['hash'])
563 namespace = to_ascii(data['namespace'])
564 isolate_server = to_ascii(data['storage'])
565 if not file_path.is_url(isolate_server):
566 raise ValueError()
Kevin Graneyc2c3b9e2014-08-26 09:04:17 -0400567 data = {
568 'hash': isolated_hash,
569 'namespace': namespace,
570 'server': isolate_server,
571 'view_url': '%s/browse?%s' % (isolate_server, urllib.urlencode(
572 [('namespace', namespace), ('hash', isolated_hash)])),
573 }
574 return data
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700575 except (KeyError, ValueError):
576 logging.warning(
577 'Unexpected value of run_isolated_out_hack: %s', match.group(1))
578 return None
579
580
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500581def now():
582 """Exists so it can be mocked easily."""
583 return time.time()
584
585
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700586def retrieve_results(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400587 base_url, shard_index, task_id, timeout, should_stop, output_collector):
588 """Retrieves results for a single task ID.
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700589
Vadim Shtayurab450c602014-05-12 19:23:25 -0700590 Returns:
591 <result dict> on success.
592 None on failure.
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700593 """
maruel@chromium.org814d23f2013-10-01 19:08:00 +0000594 assert isinstance(timeout, float), timeout
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400595 result_url = '%s/swarming/api/v1/client/task/%s' % (base_url, task_id)
596 output_url = '%s/swarming/api/v1/client/task/%s/output/all' % (
597 base_url, task_id)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700598 started = now()
599 deadline = started + timeout if timeout else None
600 attempt = 0
601
602 while not should_stop.is_set():
603 attempt += 1
604
605 # Waiting for too long -> give up.
606 current_time = now()
607 if deadline and current_time >= deadline:
608 logging.error('retrieve_results(%s) timed out on attempt %d',
609 base_url, attempt)
610 return None
611
612 # Do not spin too fast. Spin faster at the beginning though.
613 # Start with 1 sec delay and for each 30 sec of waiting add another second
614 # of delay, until hitting 15 sec ceiling.
615 if attempt > 1:
616 max_delay = min(15, 1 + (current_time - started) / 30.0)
617 delay = min(max_delay, deadline - current_time) if deadline else max_delay
618 if delay > 0:
619 logging.debug('Waiting %.1f sec before retrying', delay)
620 should_stop.wait(delay)
621 if should_stop.is_set():
622 return None
623
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400624 # Disable internal retries in net.url_read_json, since we are doing retries
625 # ourselves.
626 # TODO(maruel): We'd need to know if it's a 404 and not retry at all.
627 result = net.url_read_json(result_url, retry_50x=False)
628 if not result:
Marc-Antoine Ruel200b3952014-08-14 11:07:44 -0400629 continue
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400630 if result['state'] in State.STATES_NOT_RUNNING:
631 out = net.url_read_json(output_url)
632 result['outputs'] = (out or {}).get('outputs', [])
633 if not result['outputs']:
634 logging.error('No output found for task %s', task_id)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700635 # Record the result, try to fetch attached output files (if any).
636 if output_collector:
637 # TODO(vadimsh): Respect |should_stop| and |deadline| when fetching.
Vadim Shtayurab450c602014-05-12 19:23:25 -0700638 output_collector.process_shard_result(shard_index, result)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700639 return result
maruel@chromium.org0437a732013-08-27 16:05:52 +0000640
641
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700642def yield_results(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400643 swarm_base_url, task_ids, timeout, max_threads, print_status_updates,
644 output_collector):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500645 """Yields swarming task results from the swarming server as (index, result).
maruel@chromium.org0437a732013-08-27 16:05:52 +0000646
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700647 Duplicate shards are ignored. Shards are yielded in order of completion.
648 Timed out shards are NOT yielded at all. Caller can compare number of yielded
649 shards with len(task_keys) to verify all shards completed.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000650
651 max_threads is optional and is used to limit the number of parallel fetches
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500652 done. Since in general the number of task_keys is in the range <=10, it's not
maruel@chromium.org0437a732013-08-27 16:05:52 +0000653 worth normally to limit the number threads. Mostly used for testing purposes.
Marc-Antoine Ruel5c720342014-02-21 14:46:14 -0500654
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700655 output_collector is an optional instance of TaskOutputCollector that will be
656 used to fetch files produced by a task from isolate server to the local disk.
657
Marc-Antoine Ruel5c720342014-02-21 14:46:14 -0500658 Yields:
659 (index, result). In particular, 'result' is defined as the
660 GetRunnerResults() function in services/swarming/server/test_runner.py.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000661 """
maruel@chromium.org0437a732013-08-27 16:05:52 +0000662 number_threads = (
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400663 min(max_threads, len(task_ids)) if max_threads else len(task_ids))
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700664 should_stop = threading.Event()
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700665 results_channel = threading_utils.TaskChannel()
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700666
maruel@chromium.org0437a732013-08-27 16:05:52 +0000667 with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool:
668 try:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700669 # Adds a task to the thread pool to call 'retrieve_results' and return
670 # the results together with shard_index that produced them (as a tuple).
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400671 def enqueue_retrieve_results(shard_index, task_id):
Vadim Shtayurab450c602014-05-12 19:23:25 -0700672 task_fn = lambda *args: (shard_index, retrieve_results(*args))
maruel@chromium.org0437a732013-08-27 16:05:52 +0000673 pool.add_task(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400674 0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index,
675 task_id, timeout, should_stop, output_collector)
Vadim Shtayurab450c602014-05-12 19:23:25 -0700676
677 # Enqueue 'retrieve_results' calls for each shard key to run in parallel.
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400678 for shard_index, task_id in enumerate(task_ids):
679 enqueue_retrieve_results(shard_index, task_id)
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700680
681 # Wait for all of them to finish.
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400682 shards_remaining = range(len(task_ids))
683 active_task_count = len(task_ids)
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700684 while active_task_count:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700685 shard_index, result = None, None
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700686 try:
Vadim Shtayurab450c602014-05-12 19:23:25 -0700687 shard_index, result = results_channel.pull(
688 timeout=STATUS_UPDATE_INTERVAL)
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700689 except threading_utils.TaskChannel.Timeout:
690 if print_status_updates:
691 print(
692 'Waiting for results from the following shards: %s' %
693 ', '.join(map(str, shards_remaining)))
694 sys.stdout.flush()
695 continue
696 except Exception:
697 logging.exception('Unexpected exception in retrieve_results')
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700698
699 # A call to 'retrieve_results' finished (successfully or not).
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700700 active_task_count -= 1
maruel@chromium.org0437a732013-08-27 16:05:52 +0000701 if not result:
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500702 logging.error('Failed to retrieve the results for a swarming key')
maruel@chromium.org0437a732013-08-27 16:05:52 +0000703 continue
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700704
Vadim Shtayurab450c602014-05-12 19:23:25 -0700705 # Yield back results to the caller.
706 assert shard_index in shards_remaining
707 shards_remaining.remove(shard_index)
708 yield shard_index, result
Vadim Shtayurab19319e2014-04-27 08:50:06 -0700709
maruel@chromium.org0437a732013-08-27 16:05:52 +0000710 finally:
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700711 # Done or aborted with Ctrl+C, kill the remaining threads.
maruel@chromium.org0437a732013-08-27 16:05:52 +0000712 should_stop.set()
713
714
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400715def decorate_shard_output(swarming, shard_index, metadata):
maruel@chromium.org0437a732013-08-27 16:05:52 +0000716 """Returns wrapped output for swarming task shard."""
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400717 def t(d):
718 return datetime.datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
719 if metadata.get('started_ts'):
720 pending = '%.1fs' % (
721 t(metadata['started_ts']) - t(metadata['created_ts'])).total_seconds()
722 else:
723 pending = 'N/A'
724
725 if metadata.get('durations'):
726 duration = '%.1fs' % metadata['durations'][0]
727 else:
728 duration = 'N/A'
729
730 if metadata.get('exit_codes'):
731 exit_code = '%d' % metadata['exit_codes'][0]
732 else:
733 exit_code = 'N/A'
734
735 bot_id = metadata.get('bot_id') or 'N/A'
736
737 url = '%s/user/task/%s' % (swarming, metadata['id'])
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400738 tag_header = 'Shard %d %s' % (shard_index, url)
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400739 tag_footer = (
740 'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % (
741 shard_index, pending, duration, bot_id, exit_code))
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400742
743 tag_len = max(len(tag_header), len(tag_footer))
744 dash_pad = '+-%s-+\n' % ('-' * tag_len)
745 tag_header = '| %s |\n' % tag_header.ljust(tag_len)
746 tag_footer = '| %s |\n' % tag_footer.ljust(tag_len)
747
748 header = dash_pad + tag_header + dash_pad
749 footer = dash_pad + tag_footer + dash_pad[:-1]
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400750 output = '\n'.join(o for o in metadata['outputs'] if o).rstrip() + '\n'
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400751 return header + output + footer
maruel@chromium.org0437a732013-08-27 16:05:52 +0000752
753
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700754def collect(
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400755 swarming, task_name, task_ids, timeout, decorate, print_status_updates,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400756 task_summary_json, task_output_dir):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500757 """Retrieves results of a Swarming task."""
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700758 # Collect summary JSON and output files (if task_output_dir is not None).
759 output_collector = TaskOutputCollector(
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400760 task_output_dir, task_name, len(task_ids))
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700761
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700762 seen_shards = set()
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400763 exit_code = 0
Marc-Antoine Rueld59e8072014-10-21 18:54:45 -0400764 total_duration = 0
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700765 try:
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400766 for index, metadata in yield_results(
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400767 swarming, task_ids, timeout, None, print_status_updates,
768 output_collector):
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700769 seen_shards.add(index)
Vadim Shtayura473455a2014-05-14 15:22:35 -0700770
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400771 # Default to failure if there was no process that even started.
Marc-Antoine Ruel9b17dae2014-10-17 16:28:43 -0400772 shard_exit_code = 1
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400773 if metadata.get('exit_codes'):
774 shard_exit_code = metadata['exit_codes'][0]
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400775 if shard_exit_code:
776 exit_code = shard_exit_code
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400777 if metadata.get('durations'):
778 total_duration += metadata['durations'][0]
Vadim Shtayura473455a2014-05-14 15:22:35 -0700779
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700780 if decorate:
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400781 print(decorate_shard_output(swarming, index, metadata))
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400782 if len(seen_shards) < len(task_ids):
783 print('')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700784 else:
Marc-Antoine Ruel5e6ccdb2015-04-02 15:55:13 -0400785 if metadata.get('exit_codes'):
786 exit_code = metadata['exit_codes'][0]
787 else:
788 exit_code = 'N/A'
789 print('%s: %s %d' %
790 (metadata.get('bot_id') or 'N/A', metadata['id'], exit_code))
791 for output in metadata['outputs']:
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400792 if not output:
793 continue
794 output = output.rstrip()
795 if output:
796 print(''.join(' %s\n' % l for l in output.splitlines()))
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700797 finally:
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700798 summary = output_collector.finalize()
799 if task_summary_json:
800 tools.write_json(task_summary_json, summary, False)
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700801
Marc-Antoine Rueld59e8072014-10-21 18:54:45 -0400802 if decorate and total_duration:
803 print('Total duration: %.1fs' % total_duration)
804
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -0400805 if len(seen_shards) != len(task_ids):
806 missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards]
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700807 print >> sys.stderr, ('Results from some shards are missing: %s' %
808 ', '.join(map(str, missing_shards)))
Vadim Shtayurac524f512014-05-15 09:54:56 -0700809 return 1
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700810
Marc-Antoine Ruel4e6b73d2014-10-03 18:00:05 -0400811 return exit_code
maruel@chromium.org0437a732013-08-27 16:05:52 +0000812
813
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500814### Commands.
815
816
817def abort_task(_swarming, _manifest):
818 """Given a task manifest that was triggered, aborts its execution."""
819 # TODO(vadimsh): No supported by the server yet.
820
821
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400822def add_filter_options(parser):
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500823 parser.filter_group = tools.optparse.OptionGroup(parser, 'Filtering slaves')
824 parser.filter_group.add_option(
Marc-Antoine Ruelb39e8cf2014-01-20 10:39:31 -0500825 '-d', '--dimension', default=[], action='append', nargs=2,
Marc-Antoine Ruel92f32422013-11-06 18:12:13 -0500826 dest='dimensions', metavar='FOO bar',
827 help='dimension to filter on')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500828 parser.add_option_group(parser.filter_group)
829
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400830
Vadim Shtayurab450c602014-05-12 19:23:25 -0700831def add_sharding_options(parser):
832 parser.sharding_group = tools.optparse.OptionGroup(parser, 'Sharding options')
833 parser.sharding_group.add_option(
834 '--shards', type='int', default=1,
835 help='Number of shards to trigger and collect.')
836 parser.add_option_group(parser.sharding_group)
837
838
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400839def add_trigger_options(parser):
840 """Adds all options to trigger a task on Swarming."""
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -0500841 isolateserver.add_isolate_server_options(parser)
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400842 add_filter_options(parser)
843
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500844 parser.task_group = tools.optparse.OptionGroup(parser, 'Task properties')
845 parser.task_group.add_option(
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500846 '-s', '--isolated',
847 help='Hash of the .isolated to grab from the isolate server')
848 parser.task_group.add_option(
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500849 '-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar',
Vadim Shtayurab450c602014-05-12 19:23:25 -0700850 help='Environment variables to set')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500851 parser.task_group.add_option(
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500852 '--priority', type='int', default=100,
853 help='The lower value, the more important the task is')
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500854 parser.task_group.add_option(
Marc-Antoine Ruel5b475782014-02-14 20:57:59 -0500855 '-T', '--task-name',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400856 help='Display name of the task. Defaults to '
857 '<base_name>/<dimensions>/<isolated hash>/<timestamp> if an '
858 'isolated file is provided, if a hash is provided, it defaults to '
859 '<user>/<dimensions>/<isolated hash>/<timestamp>')
Marc-Antoine Ruel13b7b782014-03-14 11:14:57 -0400860 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400861 '--tags', action='append', default=[],
862 help='Tags to assign to the task.')
863 parser.task_group.add_option(
Marc-Antoine Ruel686a2872014-12-05 10:06:29 -0500864 '--user', default='',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400865 help='User associated with the task. Defaults to authenticated user on '
866 'the server.')
867 parser.task_group.add_option(
Marc-Antoine Ruel02196392014-10-17 16:29:43 -0400868 '--idempotent', action='store_true', default=False,
869 help='When set, the server will actively try to find a previous task '
870 'with the same parameter and return this result instead if possible')
871 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400872 '--expiration', type='int', default=6*60*60,
Marc-Antoine Ruel13b7b782014-03-14 11:14:57 -0400873 help='Seconds to allow the task to be pending for a bot to run before '
874 'this task request expires.')
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400875 parser.task_group.add_option(
Marc-Antoine Ruel77142812014-10-03 11:19:43 -0400876 '--deadline', type='int', dest='expiration',
877 help=tools.optparse.SUPPRESS_HELP)
878 parser.task_group.add_option(
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400879 '--hard-timeout', type='int', default=60*60,
880 help='Seconds to allow the task to complete.')
881 parser.task_group.add_option(
882 '--io-timeout', type='int', default=20*60,
883 help='Seconds to allow the task to be silent.')
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500884 parser.task_group.add_option(
885 '--raw-cmd', action='store_true', default=False,
886 help='When set, the command after -- is used as-is without run_isolated. '
887 'In this case, no .isolated file is expected.')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500888 parser.add_option_group(parser.task_group)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000889
890
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -0500891def process_trigger_options(parser, options, args):
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500892 """Processes trigger options and uploads files to isolate server if necessary.
893 """
894 options.dimensions = dict(options.dimensions)
895 options.env = dict(options.env)
896
897 data = []
898 if not options.dimensions:
899 parser.error('Please at least specify one --dimension')
900 if options.raw_cmd:
901 if not args:
902 parser.error(
903 'Arguments with --raw-cmd should be passed after -- as command '
904 'delimiter.')
905 if options.isolate_server:
906 parser.error('Can\'t use both --raw-cmd and --isolate-server.')
907
908 command = args
909 if not options.task_name:
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -0500910 options.task_name = u'%s/%s' % (
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -0500911 options.user,
912 '_'.join(
913 '%s=%s' % (k, v)
914 for k, v in sorted(options.dimensions.iteritems())))
915 else:
916 isolateserver.process_isolate_server_options(parser, options, False)
917 try:
918 command, data = isolated_handle_options(options, args)
919 except ValueError as e:
920 parser.error(str(e))
921
922 return TaskRequest(
923 command=command,
924 data=data,
925 dimensions=options.dimensions,
926 env=options.env,
927 expiration=options.expiration,
928 hard_timeout=options.hard_timeout,
929 idempotent=options.idempotent,
930 io_timeout=options.io_timeout,
931 name=options.task_name,
932 priority=options.priority,
933 tags=options.tags,
934 user=options.user,
935 verbose=options.verbose)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000936
937
938def add_collect_options(parser):
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500939 parser.server_group.add_option(
maruel@chromium.org0437a732013-08-27 16:05:52 +0000940 '-t', '--timeout',
941 type='float',
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -0400942 default=80*60.,
maruel@chromium.org0437a732013-08-27 16:05:52 +0000943 help='Timeout to wait for result, set to 0 for no timeout; default: '
944 '%default s')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -0500945 parser.group_logging.add_option(
946 '--decorate', action='store_true', help='Decorate output')
Vadim Shtayura86a2cef2014-04-18 11:13:39 -0700947 parser.group_logging.add_option(
948 '--print-status-updates', action='store_true',
949 help='Print periodic status updates')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700950 parser.task_output_group = tools.optparse.OptionGroup(parser, 'Task output')
951 parser.task_output_group.add_option(
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700952 '--task-summary-json',
953 metavar='FILE',
954 help='Dump a summary of task results to this file as json. It contains '
955 'only shards statuses as know to server directly. Any output files '
956 'emitted by the task can be collected by using --task-output-dir')
957 parser.task_output_group.add_option(
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700958 '--task-output-dir',
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700959 metavar='DIR',
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700960 help='Directory to put task results into. When the task finishes, this '
Vadim Shtayurac8437bf2014-07-09 19:45:36 -0700961 'directory contains per-shard directory with output files produced '
962 'by shards: <task-output-dir>/<zero-based-shard-index>/.')
Vadim Shtayurae3fbd102014-04-29 17:05:21 -0700963 parser.add_option_group(parser.task_output_group)
maruel@chromium.org0437a732013-08-27 16:05:52 +0000964
965
Marc-Antoine Ruel13e7c882015-03-26 18:19:10 -0400966@subcommand.usage('bots...')
967def CMDbot_delete(parser, args):
968 """Forcibly deletes bots from the Swarming server."""
969 parser.add_option(
970 '-f', '--force', action='store_true',
971 help='Do not prompt for confirmation')
972 options, args = parser.parse_args(args)
973 if not args:
974 parser.error('Please specific bots to delete')
975
976 bots = sorted(args)
977 if not options.force:
978 print('Delete the following bots?')
979 for bot in bots:
980 print(' %s' % bot)
981 if raw_input('Continue? [y/N] ') not in ('y', 'Y'):
982 print('Goodbye.')
983 return 1
984
985 result = 0
986 for bot in bots:
987 url = '%s/swarming/api/v1/client/bot/%s' % (options.swarming, bot)
988 if net.url_read_json(url, method='DELETE') is None:
989 print('Deleting %s failed' % bot)
990 result = 1
991 return result
992
993
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -0400994def CMDbots(parser, args):
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -0400995 """Returns information about the bots connected to the Swarming server."""
996 add_filter_options(parser)
997 parser.filter_group.add_option(
Marc-Antoine Ruel28083112014-03-13 16:34:04 -0400998 '--dead-only', action='store_true',
999 help='Only print dead bots, useful to reap them and reimage broken bots')
1000 parser.filter_group.add_option(
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001001 '-k', '--keep-dead', action='store_true',
1002 help='Do not filter out dead bots')
1003 parser.filter_group.add_option(
1004 '-b', '--bare', action='store_true',
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001005 help='Do not print out dimensions')
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001006 options, args = parser.parse_args(args)
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001007
1008 if options.keep_dead and options.dead_only:
1009 parser.error('Use only one of --keep-dead and --dead-only')
Vadim Shtayura6b555c12014-07-23 16:22:18 -07001010
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001011 bots = []
1012 cursor = None
1013 limit = 250
1014 # Iterate via cursors.
1015 base_url = options.swarming + '/swarming/api/v1/client/bots?limit=%d' % limit
1016 while True:
1017 url = base_url
1018 if cursor:
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001019 url += '&cursor=%s' % urllib.quote(cursor)
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001020 data = net.url_read_json(url)
1021 if data is None:
1022 print >> sys.stderr, 'Failed to access %s' % options.swarming
1023 return 1
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001024 bots.extend(data['items'])
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001025 cursor = data['cursor']
1026 if not cursor:
1027 break
1028
1029 for bot in natsort.natsorted(bots, key=lambda x: x['id']):
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001030 if options.dead_only:
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001031 if not bot['is_dead']:
Marc-Antoine Ruel28083112014-03-13 16:34:04 -04001032 continue
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001033 elif not options.keep_dead and bot['is_dead']:
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001034 continue
1035
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001036 # If the user requested to filter on dimensions, ensure the bot has all the
1037 # dimensions requested.
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001038 dimensions = bot['dimensions']
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001039 for key, value in options.dimensions:
1040 if key not in dimensions:
1041 break
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001042 # A bot can have multiple value for a key, for example,
1043 # {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will
1044 # be accepted.
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001045 if isinstance(dimensions[key], list):
1046 if value not in dimensions[key]:
1047 break
1048 else:
1049 if value != dimensions[key]:
1050 break
1051 else:
Marc-Antoine Ruelc6c579e2014-09-08 18:43:45 -04001052 print bot['id']
Marc-Antoine Ruele7b00162014-03-12 16:59:01 -04001053 if not options.bare:
Marc-Antoine Ruel0a620612014-08-13 15:47:07 -04001054 print ' %s' % json.dumps(dimensions, sort_keys=True)
Marc-Antoine Ruelfd491172014-11-19 19:26:13 -05001055 if bot.get('task_id'):
1056 print ' task: %s' % bot['task_id']
Marc-Antoine Ruel819fb162014-03-12 16:38:26 -04001057 return 0
1058
1059
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001060@subcommand.usage('--json file | task_id...')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001061def CMDcollect(parser, args):
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001062 """Retrieves results of one or multiple Swarming task by its ID.
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001063
1064 The result can be in multiple part if the execution was sharded. It can
1065 potentially have retries.
1066 """
1067 add_collect_options(parser)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001068 parser.add_option(
1069 '-j', '--json',
1070 help='Load the task ids from .json as saved by trigger --dump-json')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001071 (options, args) = parser.parse_args(args)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001072 if not args and not options.json:
1073 parser.error('Must specify at least one task id or --json.')
1074 if args and options.json:
1075 parser.error('Only use one of task id or --json.')
1076
1077 if options.json:
Marc-Antoine Ruel9025a782015-03-17 16:42:59 -04001078 try:
1079 with open(options.json) as f:
1080 tasks = sorted(
1081 json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index'])
1082 args = [t['task_id'] for t in tasks]
1083 except (KeyError, IOError, ValueError):
1084 parser.error('Failed to parse %s' % options.json)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001085 else:
1086 valid = frozenset('0123456789abcdef')
1087 if any(not valid.issuperset(task_id) for task_id in args):
1088 parser.error('Task ids are 0-9a-f.')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001089
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001090 try:
1091 return collect(
1092 options.swarming,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001093 None,
1094 args,
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001095 options.timeout,
1096 options.decorate,
1097 options.print_status_updates,
1098 options.task_summary_json,
1099 options.task_output_dir)
1100 except Failure:
1101 on_error.report(None)
1102 return 1
1103
1104
1105@subcommand.usage('[resource name]')
1106def CMDquery(parser, args):
1107 """Returns raw JSON information via an URL endpoint. Use 'list' to gather the
1108 list of valid values from the server.
1109
1110 Examples:
1111 Printing the list of known URLs:
1112 swarming.py query -S https://server-url list
1113
1114 Listing last 50 tasks on a specific bot named 'swarm1'
1115 swarming.py query -S https://server-url --limit 50 bot/swarm1/tasks
1116 """
1117 CHUNK_SIZE = 250
1118
1119 parser.add_option(
1120 '-L', '--limit', type='int', default=200,
1121 help='Limit to enforce on limitless items (like number of tasks); '
1122 'default=%default')
Paweł Hajdan, Jr53ef0132015-03-20 17:49:18 +01001123 parser.add_option(
1124 '--json', help='Path to JSON output file (otherwise prints to stdout)')
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001125 (options, args) = parser.parse_args(args)
1126 if len(args) != 1:
1127 parser.error('Must specify only one resource name.')
1128
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001129 base_url = options.swarming + '/swarming/api/v1/client/' + args[0]
1130 url = base_url
1131 if options.limit:
Marc-Antoine Ruelea74f292014-10-24 20:55:39 -04001132 # Check check, change if not working out.
1133 merge_char = '&' if '?' in url else '?'
1134 url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit))
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001135 data = net.url_read_json(url)
1136 if data is None:
1137 print >> sys.stderr, 'Failed to access %s' % options.swarming
1138 return 1
1139
1140 # Some items support cursors. Try to get automatically if cursors are needed
1141 # by looking at the 'cursor' items.
1142 while (
1143 data.get('cursor') and
1144 (not options.limit or len(data['items']) < options.limit)):
Marc-Antoine Ruel0696e402015-03-23 15:28:44 -04001145 merge_char = '&' if '?' in base_url else '?'
1146 url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor']))
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001147 if options.limit:
1148 url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items']))
1149 new = net.url_read_json(url)
1150 if new is None:
1151 print >> sys.stderr, 'Failed to access %s' % options.swarming
1152 return 1
1153 data['items'].extend(new['items'])
1154 data['cursor'] = new['cursor']
1155
1156 if options.limit and len(data.get('items', [])) > options.limit:
1157 data['items'] = data['items'][:options.limit]
1158 data.pop('cursor', None)
1159
Paweł Hajdan, Jr53ef0132015-03-20 17:49:18 +01001160 if options.json:
1161 with open(options.json, 'w') as f:
1162 json.dump(data, f)
1163 else:
Marc-Antoine Ruelcda90ee2015-03-23 15:13:20 -04001164 try:
1165 json.dump(data, sys.stdout, indent=2, sort_keys=True)
1166 sys.stdout.write('\n')
1167 except IOError:
1168 pass
Marc-Antoine Ruel79940ae2014-09-23 17:55:41 -04001169 return 0
1170
1171
Vadim Shtayuraae8085b2014-05-02 17:13:10 -07001172@subcommand.usage('(hash|isolated) [-- extra_args]')
maruel@chromium.org0437a732013-08-27 16:05:52 +00001173def CMDrun(parser, args):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001174 """Triggers a task and wait for the results.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001175
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001176 Basically, does everything to run a command remotely.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001177 """
1178 add_trigger_options(parser)
1179 add_collect_options(parser)
Vadim Shtayurab450c602014-05-12 19:23:25 -07001180 add_sharding_options(parser)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001181 options, args = parser.parse_args(args)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001182 task_request = process_trigger_options(parser, options, args)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001183 try:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001184 tasks = trigger_task_shards(
1185 options.swarming, task_request, options.shards)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001186 except Failure as e:
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001187 on_error.report(
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001188 'Failed to trigger %s(%s): %s' %
1189 (options.task_name, args[0], e.args[0]))
1190 return 1
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001191 if not tasks:
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001192 on_error.report('Failed to trigger the task.')
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001193 return 1
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001194 print('Triggered task: %s' % options.task_name)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001195 task_ids = [
1196 t['task_id']
1197 for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index'])
1198 ]
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001199 try:
1200 return collect(
1201 options.swarming,
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001202 options.task_name,
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001203 task_ids,
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001204 options.timeout,
Vadim Shtayura86a2cef2014-04-18 11:13:39 -07001205 options.decorate,
Vadim Shtayurae3fbd102014-04-29 17:05:21 -07001206 options.print_status_updates,
Vadim Shtayurac8437bf2014-07-09 19:45:36 -07001207 options.task_summary_json,
Vadim Shtayurae3fbd102014-04-29 17:05:21 -07001208 options.task_output_dir)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001209 except Failure:
1210 on_error.report(None)
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001211 return 1
maruel@chromium.org0437a732013-08-27 16:05:52 +00001212
1213
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001214@subcommand.usage('task_id')
1215def CMDreproduce(parser, args):
1216 """Runs a task locally that was triggered on the server.
1217
1218 This running locally the same commands that have been run on the bot. The data
1219 downloaded will be in a subdirectory named 'work' of the current working
1220 directory.
1221 """
1222 options, args = parser.parse_args(args)
1223 if len(args) != 1:
1224 parser.error('Must specify exactly one task id.')
1225
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001226 url = options.swarming + '/swarming/api/v1/client/task/%s/request' % args[0]
1227 request = net.url_read_json(url)
1228 if not request:
1229 print >> sys.stderr, 'Failed to retrieve request data for the task'
1230 return 1
1231
1232 if not os.path.isdir('work'):
1233 os.mkdir('work')
1234
1235 swarming_host = urlparse.urlparse(options.swarming).netloc
1236 properties = request['properties']
1237 for data_url, _ in properties['data']:
1238 assert data_url.startswith('https://'), data_url
1239 data_host = urlparse.urlparse(data_url).netloc
1240 if data_host != swarming_host:
1241 auth.ensure_logged_in('https://' + data_host)
1242
1243 content = net.url_read(data_url)
1244 if content is None:
1245 print >> sys.stderr, 'Failed to download %s' % data_url
1246 return 1
1247 with zipfile.ZipFile(StringIO.StringIO(content)) as zip_file:
1248 zip_file.extractall('work')
1249
1250 env = None
1251 if properties['env']:
1252 env = os.environ.copy()
Marc-Antoine Ruel119b0842014-12-19 15:27:58 -05001253 logging.info('env: %r', properties['env'])
1254 env.update(
1255 (k.encode('utf-8'), v.encode('utf-8'))
1256 for k, v in properties['env'].iteritems())
Marc-Antoine Ruel13a81272014-10-07 20:16:43 -04001257
1258 exit_code = 0
1259 for cmd in properties['commands']:
1260 try:
1261 c = subprocess.call(cmd, env=env, cwd='work')
1262 except OSError as e:
1263 print >> sys.stderr, 'Failed to run: %s' % ' '.join(cmd)
1264 print >> sys.stderr, str(e)
1265 c = 1
1266 if not exit_code:
1267 exit_code = c
1268 return exit_code
1269
1270
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001271@subcommand.usage("(hash|isolated) [-- extra_args|raw command]")
maruel@chromium.org0437a732013-08-27 16:05:52 +00001272def CMDtrigger(parser, args):
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001273 """Triggers a Swarming task.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001274
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001275 Accepts either the hash (sha1) of a .isolated file already uploaded or the
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001276 path to an .isolated file to archive.
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001277
1278 If an .isolated file is specified instead of an hash, it is first archived.
Vadim Shtayuraae8085b2014-05-02 17:13:10 -07001279
1280 Passes all extra arguments provided after '--' as additional command line
1281 arguments for an isolated command specified in *.isolate file.
maruel@chromium.org0437a732013-08-27 16:05:52 +00001282 """
1283 add_trigger_options(parser)
Vadim Shtayurab450c602014-05-12 19:23:25 -07001284 add_sharding_options(parser)
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001285 parser.add_option(
1286 '--dump-json',
1287 metavar='FILE',
1288 help='Dump details about the triggered task(s) to this file as json')
Marc-Antoine Ruel7c543272013-11-26 13:26:15 -05001289 options, args = parser.parse_args(args)
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001290 task_request = process_trigger_options(parser, options, args)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001291 try:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001292 tasks = trigger_task_shards(
1293 options.swarming, task_request, options.shards)
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001294 if tasks:
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001295 print('Triggered task: %s' % options.task_name)
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001296 tasks_sorted = sorted(
1297 tasks.itervalues(), key=lambda x: x['shard_index'])
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001298 if options.dump_json:
1299 data = {
Marc-Antoine Ruelefdc5282014-12-12 19:31:00 -05001300 'base_task_name': options.task_name,
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001301 'tasks': tasks,
1302 }
1303 tools.write_json(options.dump_json, data, True)
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001304 print('To collect results, use:')
1305 print(' swarming.py collect -S %s --json %s' %
1306 (options.swarming, options.dump_json))
1307 else:
Marc-Antoine Ruel12a7da42014-10-01 08:29:47 -04001308 print('To collect results, use:')
1309 print(' swarming.py collect -S %s %s' %
Marc-Antoine Ruel2f6581a2014-10-03 11:09:53 -04001310 (options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
1311 print('Or visit:')
1312 for t in tasks_sorted:
1313 print(' ' + t['view_url'])
Marc-Antoine Rueld6dbe762014-06-18 13:49:42 -04001314 return int(not tasks)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001315 except Failure:
1316 on_error.report(None)
vadimsh@chromium.orgd908a542013-10-30 01:36:17 +00001317 return 1
maruel@chromium.org0437a732013-08-27 16:05:52 +00001318
1319
1320class OptionParserSwarming(tools.OptionParserWithLogging):
1321 def __init__(self, **kwargs):
1322 tools.OptionParserWithLogging.__init__(
1323 self, prog='swarming.py', **kwargs)
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -05001324 self.server_group = tools.optparse.OptionGroup(self, 'Server')
1325 self.server_group.add_option(
maruel@chromium.orge9403ab2013-09-20 18:03:49 +00001326 '-S', '--swarming',
Kevin Graney5346c162014-01-24 12:20:01 -05001327 metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
maruel@chromium.orge9403ab2013-09-20 18:03:49 +00001328 help='Swarming server to use')
Marc-Antoine Ruel5471e3d2013-11-11 19:10:32 -05001329 self.add_option_group(self.server_group)
Vadim Shtayurae34e13a2014-02-02 11:23:26 -08001330 auth.add_auth_options(self)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001331
1332 def parse_args(self, *args, **kwargs):
1333 options, args = tools.OptionParserWithLogging.parse_args(
1334 self, *args, **kwargs)
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001335 auth.process_auth_options(self, options)
1336 user = self._process_swarming(options)
1337 if hasattr(options, 'user') and not options.user:
1338 options.user = user
1339 return options, args
1340
1341 def _process_swarming(self, options):
1342 """Processes the --swarming option and aborts if not specified.
1343
1344 Returns the identity as determined by the server.
1345 """
maruel@chromium.org0437a732013-08-27 16:05:52 +00001346 if not options.swarming:
1347 self.error('--swarming is required.')
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001348 try:
1349 options.swarming = net.fix_url(options.swarming)
1350 except ValueError as e:
1351 self.error('--swarming %s' % e)
1352 on_error.report_on_exception_exit(options.swarming)
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001353 try:
1354 user = auth.ensure_logged_in(options.swarming)
1355 except ValueError as e:
1356 self.error(str(e))
Marc-Antoine Ruel012067b2014-12-10 15:45:42 -05001357 return user
maruel@chromium.org0437a732013-08-27 16:05:52 +00001358
1359
1360def main(args):
1361 dispatcher = subcommand.CommandDispatcher(__name__)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001362 return dispatcher.execute(OptionParserSwarming(version=__version__), args)
maruel@chromium.org0437a732013-08-27 16:05:52 +00001363
1364
1365if __name__ == '__main__':
1366 fix_encoding.fix_encoding()
1367 tools.disable_buffering()
1368 colorama.init()
1369 sys.exit(main(sys.argv[1:]))