blob: 3a61169cca89aefec12baaaf8f2f6607af388dd5 [file] [log] [blame]
maruel@chromium.orgc6f90062012-11-07 18:32:22 +00001#!/usr/bin/env python
maruelea586f32016-04-05 11:11:33 -07002# Copyright 2013 The LUCI Authors. All rights reserved.
maruelf1f5e2a2016-05-25 17:10:39 -07003# Use of this source code is governed under the Apache License, Version 2.0
4# that can be found in the LICENSE file.
maruel@chromium.orgc6f90062012-11-07 18:32:22 +00005
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -04006"""Archives a set of files or directories to an Isolate Server."""
maruel@chromium.orgc6f90062012-11-07 18:32:22 +00007
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +00008__version__ = '0.9.0'
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00009
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +000010import collections
nodir90bc8dc2016-06-15 13:35:21 -070011import errno
tansell9e04a8d2016-07-28 09:31:59 -070012import functools
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000013import logging
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -040014import optparse
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000015import os
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +000016import re
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +040017import signal
tansell9e04a8d2016-07-28 09:31:59 -070018import stat
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000019import sys
tansell26de79e2016-11-13 18:41:11 -080020import tarfile
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +000021import threading
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000022import time
csharp@chromium.org59c7bcf2012-11-21 21:13:18 +000023import zlib
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000024
Marc-Antoine Ruel016c7602019-04-02 18:31:13 +000025from utils import tools
26tools.force_local_third_party()
maruel@chromium.orgfb78d432013-08-28 21:22:40 +000027
Marc-Antoine Ruel016c7602019-04-02 18:31:13 +000028# third_party/
29import colorama
30from depot_tools import fix_encoding
31from depot_tools import subcommand
Takuto Ikuta6e2ff962019-10-29 12:35:27 +000032import six
Lei Leife202df2019-06-11 17:33:34 +000033from six.moves import queue as Queue
Marc-Antoine Ruel016c7602019-04-02 18:31:13 +000034
35# pylint: disable=ungrouped-imports
36import auth
37import isolated_format
38import isolate_storage
39import local_caching
Marc-Antoine Ruel37989932013-11-19 16:28:08 -050040from utils import file_path
maruel12e30012015-10-09 11:55:35 -070041from utils import fs
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -040042from utils import logging_utils
vadimsh@chromium.org6b706212013-08-28 15:03:46 +000043from utils import net
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -040044from utils import on_error
maruel8e4e40c2016-05-30 06:21:07 -070045from utils import subprocess42
vadimsh@chromium.orgb074b162013-08-22 17:55:46 +000046from utils import threading_utils
Vadim Shtayurae34e13a2014-02-02 11:23:26 -080047
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000048
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +000049# Version of isolate protocol passed to the server in /handshake request.
50ISOLATE_PROTOCOL_VERSION = '1.0'
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000051
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +000052
Vadim Shtayura3148e072014-09-02 18:51:52 -070053# Maximum expected delay (in seconds) between successive file fetches or uploads
54# in Storage. If it takes longer than that, a deadlock might be happening
55# and all stack frames for all threads are dumped to log.
56DEADLOCK_TIMEOUT = 5 * 60
57
58
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +000059# The number of files to check the isolate server per /pre-upload query.
vadimsh@chromium.orgeea52422013-08-21 19:35:54 +000060# All files are sorted by likelihood of a change in the file content
61# (currently file size is used to estimate this: larger the file -> larger the
62# possibility it has changed). Then first ITEMS_PER_CONTAINS_QUERIES[0] files
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +000063# are taken and send to '/pre-upload', then next ITEMS_PER_CONTAINS_QUERIES[1],
vadimsh@chromium.orgeea52422013-08-21 19:35:54 +000064# and so on. Numbers here is a trade-off; the more per request, the lower the
65# effect of HTTP round trip latency and TCP-level chattiness. On the other hand,
66# larger values cause longer lookups, increasing the initial latency to start
67# uploading, which is especially an issue for large files. This value is
68# optimized for the "few thousands files to look up with minimal number of large
69# files missing" case.
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -040070ITEMS_PER_CONTAINS_QUERIES = (20, 20, 50, 50, 50, 100)
csharp@chromium.org07fa7592013-01-11 18:19:30 +000071
maruel@chromium.org9958e4a2013-09-17 00:01:48 +000072
csharp@chromium.org59c7bcf2012-11-21 21:13:18 +000073# A list of already compressed extension types that should not receive any
74# compression before being uploaded.
75ALREADY_COMPRESSED_TYPES = [
Marc-Antoine Ruel7f234c82014-08-06 21:55:18 -040076 '7z', 'avi', 'cur', 'gif', 'h264', 'jar', 'jpeg', 'jpg', 'mp4', 'pdf',
77 'png', 'wav', 'zip',
csharp@chromium.org59c7bcf2012-11-21 21:13:18 +000078]
79
maruel@chromium.orgc6f90062012-11-07 18:32:22 +000080
maruel@chromium.org41601642013-09-18 19:40:46 +000081# The delay (in seconds) to wait between logging statements when retrieving
82# the required files. This is intended to let the user (or buildbot) know that
83# the program is still running.
84DELAY_BETWEEN_UPDATES_IN_SECS = 30
85
86
Marc-Antoine Ruelac54cb42013-11-18 14:05:35 -050087DEFAULT_BLACKLIST = (
88 # Temporary vim or python files.
89 r'^.+\.(?:pyc|swp)$',
90 # .git or .svn directory.
91 r'^(?:.+' + re.escape(os.path.sep) + r'|)\.(?:git|svn)$',
92)
93
94
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -050095class Error(Exception):
96 """Generic runtime error."""
97 pass
98
99
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400100class Aborted(Error):
101 """Operation aborted."""
102 pass
103
104
nodir90bc8dc2016-06-15 13:35:21 -0700105class AlreadyExists(Error):
106 """File already exists."""
107
108
maruel12e30012015-10-09 11:55:35 -0700109def file_read(path, chunk_size=isolated_format.DISK_FILE_CHUNK, offset=0):
Vadim Shtayuraf0cb97a2013-12-05 13:57:49 -0800110 """Yields file content in chunks of |chunk_size| starting from |offset|."""
maruel12e30012015-10-09 11:55:35 -0700111 with fs.open(path, 'rb') as f:
Vadim Shtayuraf0cb97a2013-12-05 13:57:49 -0800112 if offset:
113 f.seek(offset)
maruel@chromium.org8750e4b2013-09-18 02:37:57 +0000114 while True:
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000115 data = f.read(chunk_size)
maruel@chromium.org8750e4b2013-09-18 02:37:57 +0000116 if not data:
117 break
118 yield data
119
120
tansell9e04a8d2016-07-28 09:31:59 -0700121def fileobj_path(fileobj):
122 """Return file system path for file like object or None.
123
124 The returned path is guaranteed to exist and can be passed to file system
125 operations like copy.
126 """
127 name = getattr(fileobj, 'name', None)
128 if name is None:
Marc-Antoine Ruel793bff32019-04-18 17:50:48 +0000129 return None
tansell9e04a8d2016-07-28 09:31:59 -0700130
131 # If the file like object was created using something like open("test.txt")
132 # name will end up being a str (such as a function outside our control, like
133 # the standard library). We want all our paths to be unicode objects, so we
134 # decode it.
Takuto Ikuta95459dd2019-10-29 12:39:47 +0000135 if not isinstance(name, six.text_type):
Marc-Antoine Rueld8464b12017-12-04 15:59:41 -0500136 # We incorrectly assume that UTF-8 is used everywhere.
137 name = name.decode('utf-8')
tansell9e04a8d2016-07-28 09:31:59 -0700138
tansell26de79e2016-11-13 18:41:11 -0800139 # fs.exists requires an absolute path, otherwise it will fail with an
140 # assertion error.
141 if not os.path.isabs(name):
Takuto Ikuta523c6472019-09-18 02:53:34 +0000142 return None
tansell26de79e2016-11-13 18:41:11 -0800143
tansell9e04a8d2016-07-28 09:31:59 -0700144 if fs.exists(name):
145 return name
Marc-Antoine Ruel793bff32019-04-18 17:50:48 +0000146 return None
tansell9e04a8d2016-07-28 09:31:59 -0700147
148
149# TODO(tansell): Replace fileobj_copy with shutil.copyfileobj once proper file
150# wrappers have been created.
151def fileobj_copy(
152 dstfileobj, srcfileobj, size=-1,
153 chunk_size=isolated_format.DISK_FILE_CHUNK):
154 """Copy data from srcfileobj to dstfileobj.
155
156 Providing size means exactly that amount of data will be copied (if there
157 isn't enough data, an IOError exception is thrown). Otherwise all data until
158 the EOF marker will be copied.
159 """
160 if size == -1 and hasattr(srcfileobj, 'tell'):
161 if srcfileobj.tell() != 0:
162 raise IOError('partial file but not using size')
163
164 written = 0
165 while written != size:
166 readsize = chunk_size
167 if size > 0:
168 readsize = min(readsize, size-written)
169 data = srcfileobj.read(readsize)
170 if not data:
171 if size == -1:
172 break
173 raise IOError('partial file, got %s, wanted %s' % (written, size))
174 dstfileobj.write(data)
175 written += len(data)
176
177
178def putfile(srcfileobj, dstpath, file_mode=None, size=-1, use_symlink=False):
179 """Put srcfileobj at the given dstpath with given mode.
180
181 The function aims to do this as efficiently as possible while still allowing
182 any possible file like object be given.
183
184 Creating a tree of hardlinks has a few drawbacks:
185 - tmpfs cannot be used for the scratch space. The tree has to be on the same
186 partition as the cache.
187 - involves a write to the inode, which advances ctime, cause a metadata
188 writeback (causing disk seeking).
189 - cache ctime cannot be used to detect modifications / corruption.
190 - Some file systems (NTFS) have a 64k limit on the number of hardlink per
191 partition. This is why the function automatically fallbacks to copying the
192 file content.
193 - /proc/sys/fs/protected_hardlinks causes an additional check to ensure the
194 same owner is for all hardlinks.
195 - Anecdotal report that ext2 is known to be potentially faulty on high rate
196 of hardlink creation.
197
198 Creating a tree of symlinks has a few drawbacks:
199 - Tasks running the equivalent of os.path.realpath() will get the naked path
200 and may fail.
201 - Windows:
202 - Symlinks are reparse points:
203 https://msdn.microsoft.com/library/windows/desktop/aa365460.aspx
204 https://msdn.microsoft.com/library/windows/desktop/aa363940.aspx
205 - Symbolic links are Win32 paths, not NT paths.
206 https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
207 - Symbolic links are supported on Windows 7 and later only.
208 - SeCreateSymbolicLinkPrivilege is needed, which is not present by
209 default.
210 - SeCreateSymbolicLinkPrivilege is *stripped off* by UAC when a restricted
211 RID is present in the token;
212 https://msdn.microsoft.com/en-us/library/bb530410.aspx
213 """
214 srcpath = fileobj_path(srcfileobj)
215 if srcpath and size == -1:
216 readonly = file_mode is None or (
217 file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
218
219 if readonly:
220 # If the file is read only we can link the file
221 if use_symlink:
222 link_mode = file_path.SYMLINK_WITH_FALLBACK
223 else:
224 link_mode = file_path.HARDLINK_WITH_FALLBACK
225 else:
226 # If not read only, we must copy the file
227 link_mode = file_path.COPY
228
229 file_path.link_file(dstpath, srcpath, link_mode)
Takuto Ikuta523c6472019-09-18 02:53:34 +0000230 assert fs.exists(dstpath)
tansell9e04a8d2016-07-28 09:31:59 -0700231 else:
232 # Need to write out the file
233 with fs.open(dstpath, 'wb') as dstfileobj:
234 fileobj_copy(dstfileobj, srcfileobj, size)
235
Takuto Ikuta523c6472019-09-18 02:53:34 +0000236 if sys.platform == 'win32' and file_mode and file_mode & stat.S_IWRITE:
237 # On windows, mode other than removing stat.S_IWRITE is ignored. Returns
238 # early to skip slow/unnecessary chmod call.
239 return
tansell9e04a8d2016-07-28 09:31:59 -0700240
241 # file_mode of 0 is actually valid, so need explicit check.
242 if file_mode is not None:
243 fs.chmod(dstpath, file_mode)
244
245
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000246def zip_compress(content_generator, level=7):
247 """Reads chunks from |content_generator| and yields zip compressed chunks."""
248 compressor = zlib.compressobj(level)
249 for chunk in content_generator:
250 compressed = compressor.compress(chunk)
251 if compressed:
252 yield compressed
253 tail = compressor.flush(zlib.Z_FINISH)
254 if tail:
255 yield tail
256
257
Marc-Antoine Ruel8bee66d2014-08-28 19:02:07 -0400258def zip_decompress(
259 content_generator, chunk_size=isolated_format.DISK_FILE_CHUNK):
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000260 """Reads zipped data from |content_generator| and yields decompressed data.
261
262 Decompresses data in small chunks (no larger than |chunk_size|) so that
263 zip bomb file doesn't cause zlib to preallocate huge amount of memory.
264
265 Raises IOError if data is corrupted or incomplete.
266 """
267 decompressor = zlib.decompressobj()
268 compressed_size = 0
269 try:
270 for chunk in content_generator:
271 compressed_size += len(chunk)
272 data = decompressor.decompress(chunk, chunk_size)
273 if data:
274 yield data
275 while decompressor.unconsumed_tail:
276 data = decompressor.decompress(decompressor.unconsumed_tail, chunk_size)
277 if data:
278 yield data
279 tail = decompressor.flush()
280 if tail:
281 yield tail
282 except zlib.error as e:
283 raise IOError(
284 'Corrupted zip stream (read %d bytes) - %s' % (compressed_size, e))
285 # Ensure all data was read and decompressed.
286 if decompressor.unused_data or decompressor.unconsumed_tail:
287 raise IOError('Not all data was decompressed')
288
289
Marc-Antoine Rueldcff6462018-12-04 16:35:18 +0000290def _get_zip_compression_level(filename):
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000291 """Given a filename calculates the ideal zip compression level to use."""
292 file_ext = os.path.splitext(filename)[1].lower()
293 # TODO(csharp): Profile to find what compression level works best.
294 return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
295
296
maruel@chromium.orgaf254852013-09-17 17:48:14 +0000297def create_directories(base_directory, files):
298 """Creates the directory structure needed by the given list of files."""
299 logging.debug('create_directories(%s, %d)', base_directory, len(files))
300 # Creates the tree of directories to create.
301 directories = set(os.path.dirname(f) for f in files)
302 for item in list(directories):
303 while item:
304 directories.add(item)
305 item = os.path.dirname(item)
306 for d in sorted(directories):
307 if d:
aludwin606aa1f2016-10-31 18:41:30 -0700308 abs_d = os.path.join(base_directory, d)
309 if not fs.isdir(abs_d):
310 fs.mkdir(abs_d)
maruel@chromium.orgaf254852013-09-17 17:48:14 +0000311
312
Marc-Antoine Rueldcff6462018-12-04 16:35:18 +0000313def _create_symlinks(base_directory, files):
Marc-Antoine Ruelccafe0e2013-11-08 16:15:36 -0500314 """Creates any symlinks needed by the given set of files."""
maruel@chromium.orgaf254852013-09-17 17:48:14 +0000315 for filepath, properties in files:
316 if 'l' not in properties:
317 continue
318 if sys.platform == 'win32':
Marc-Antoine Ruelccafe0e2013-11-08 16:15:36 -0500319 # TODO(maruel): Create symlink via the win32 api.
maruel@chromium.orgaf254852013-09-17 17:48:14 +0000320 logging.warning('Ignoring symlink %s', filepath)
321 continue
322 outfile = os.path.join(base_directory, filepath)
nodir90bc8dc2016-06-15 13:35:21 -0700323 try:
324 os.symlink(properties['l'], outfile) # pylint: disable=E1101
325 except OSError as e:
326 if e.errno == errno.EEXIST:
327 raise AlreadyExists('File %s already exists.' % outfile)
328 raise
maruel@chromium.orgaf254852013-09-17 17:48:14 +0000329
330
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +0000331class _ThreadFile(object):
332 """Multithreaded fake file. Used by TarBundle."""
333 def __init__(self):
334 self._data = threading_utils.TaskChannel()
335 self._offset = 0
336
337 def __iter__(self):
338 return self._data
339
340 def tell(self):
341 return self._offset
342
343 def write(self, b):
344 self._data.send_result(b)
345 self._offset += len(b)
346
347 def close(self):
348 self._data.send_done()
349
350
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400351class FileItem(isolate_storage.Item):
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800352 """A file to push to Storage.
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000353
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800354 Its digest and size may be provided in advance, if known. Otherwise they will
355 be derived from the file content.
356 """
357
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +0000358 def __init__(self, path, algo, digest=None, size=None, high_priority=False):
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800359 super(FileItem, self).__init__(
360 digest,
maruel12e30012015-10-09 11:55:35 -0700361 size if size is not None else fs.stat(path).st_size,
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +0000362 high_priority,
363 compression_level=_get_zip_compression_level(path))
364 self._path = path
365 self._algo = algo
366 self._meta = None
367
368 @property
369 def path(self):
370 return self._path
371
372 @property
373 def digest(self):
374 if not self._digest:
375 self._digest = isolated_format.hash_file(self._path, self._algo)
376 return self._digest
377
378 @property
379 def meta(self):
380 if not self._meta:
381 # TODO(maruel): Inline.
382 self._meta = isolated_format.file_to_metadata(self.path, 0, False)
383 # We need to hash right away.
384 self._meta['h'] = self.digest
385 return self._meta
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000386
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800387 def content(self):
388 return file_read(self.path)
maruel@chromium.orgc6f90062012-11-07 18:32:22 +0000389
390
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +0000391class TarBundle(isolate_storage.Item):
392 """Tarfile to push to Storage.
393
394 Its digest is the digest of all the files it contains. It is generated on the
395 fly.
396 """
397
398 def __init__(self, root, algo):
399 # 2 trailing 512 bytes headers.
400 super(TarBundle, self).__init__(size=1024)
401 self._items = []
402 self._meta = None
403 self._algo = algo
404 self._root_len = len(root) + 1
405 # Same value as for Go.
406 # https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/tar_archiver.go
407 # https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/upload_tracker.go
408 self._archive_max_size = int(10e6)
409
410 @property
411 def digest(self):
412 if not self._digest:
413 self._prepare()
414 return self._digest
415
416 @property
417 def size(self):
418 if self._size is None:
419 self._prepare()
420 return self._size
421
422 def try_add(self, item):
423 """Try to add this file to the bundle.
424
425 It is extremely naive but this should be just enough for
426 https://crbug.com/825418.
427
428 Future improvements should be in the Go code, and the Swarming bot should be
429 migrated to use the Go code instead.
430 """
431 if not item.size:
432 return False
433 # pylint: disable=unreachable
434 rounded = (item.size + 512) & ~511
435 if rounded + self._size > self._archive_max_size:
436 return False
437 # https://crbug.com/825418
438 return False
439 self._size += rounded
440 self._items.append(item)
441 return True
442
443 def yield_item_path_meta(self):
444 """Returns a tuple(Item, filepath, meta_dict).
445
446 If the bundle contains less than 5 items, the items are yielded.
447 """
448 if len(self._items) < 5:
449 # The tarball is too small, yield individual items, if any.
450 for item in self._items:
451 yield item, item.path[self._root_len:], item.meta
452 else:
453 # This ensures self._meta is set.
454 p = self.digest + '.tar'
455 # Yield itself as a tarball.
456 yield self, p, self._meta
457
458 def content(self):
459 """Generates the tarfile content on the fly."""
460 obj = _ThreadFile()
461 def _tar_thread():
462 try:
463 t = tarfile.open(
464 fileobj=obj, mode='w', format=tarfile.PAX_FORMAT, encoding='utf-8')
465 for item in self._items:
466 logging.info(' tarring %s', item.path)
467 t.add(item.path)
468 t.close()
469 except Exception:
470 logging.exception('Internal failure')
471 finally:
472 obj.close()
473
474 t = threading.Thread(target=_tar_thread)
475 t.start()
476 try:
477 for data in obj:
478 yield data
479 finally:
480 t.join()
481
482 def _prepare(self):
483 h = self._algo()
484 total = 0
485 for chunk in self.content():
486 h.update(chunk)
487 total += len(chunk)
488 # pylint: disable=attribute-defined-outside-init
489 # This is not true, they are defined in Item.__init__().
490 self._digest = h.hexdigest()
491 self._size = total
492 self._meta = {
493 'h': self.digest,
494 's': self.size,
495 't': u'tar',
496 }
497
498
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400499class BufferItem(isolate_storage.Item):
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000500 """A byte buffer to push to Storage."""
501
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +0000502 def __init__(self, buf, algo, high_priority=False):
503 super(BufferItem, self).__init__(
504 digest=algo(buf).hexdigest(),
505 size=len(buf),
506 high_priority=high_priority)
507 self._buffer = buf
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000508
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800509 def content(self):
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +0000510 return [self._buffer]
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000511
512
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000513class Storage(object):
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800514 """Efficiently downloads or uploads large set of files via StorageApi.
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000515
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800516 Implements compression support, parallel 'contains' checks, parallel uploads
517 and more.
518
519 Works only within single namespace (and thus hashing algorithm and compression
520 scheme are fixed).
521
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400522 Spawns multiple internal threads. Thread safe, but not fork safe. Modifies
523 signal handlers table to handle Ctrl+C.
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800524 """
525
Vadim Shtayurae0ab1902014-04-29 10:55:27 -0700526 def __init__(self, storage_api):
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000527 self._storage_api = storage_api
528 self._cpu_thread_pool = None
529 self._net_thread_pool = None
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400530 self._aborted = False
531 self._prev_sig_handlers = {}
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000532
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000533 @property
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000534 def server_ref(self):
535 """Shortcut to get the server_ref from storage_api.
Vadim Shtayurae0ab1902014-04-29 10:55:27 -0700536
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000537 This can be used to get the underlying hash_algo.
Vadim Shtayurae0ab1902014-04-29 10:55:27 -0700538 """
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000539 return self._storage_api.server_ref
Vadim Shtayurae0ab1902014-04-29 10:55:27 -0700540
541 @property
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000542 def cpu_thread_pool(self):
543 """ThreadPool for CPU-bound tasks like zipping."""
544 if self._cpu_thread_pool is None:
Marc-Antoine Ruelbdad1182015-02-06 16:04:35 -0500545 threads = max(threading_utils.num_processors(), 2)
Lei Leife202df2019-06-11 17:33:34 +0000546 max_size = long(2)**32 if sys.version_info.major == 2 else 2**32
547 if sys.maxsize <= max_size:
Marc-Antoine Ruelbdad1182015-02-06 16:04:35 -0500548 # On 32 bits userland, do not try to use more than 16 threads.
549 threads = min(threads, 16)
550 self._cpu_thread_pool = threading_utils.ThreadPool(2, threads, 0, 'zip')
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000551 return self._cpu_thread_pool
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000552
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000553 @property
554 def net_thread_pool(self):
555 """AutoRetryThreadPool for IO-bound tasks, retries IOError."""
556 if self._net_thread_pool is None:
Vadim Shtayura3148e072014-09-02 18:51:52 -0700557 self._net_thread_pool = threading_utils.IOAutoRetryThreadPool()
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000558 return self._net_thread_pool
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000559
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000560 def close(self):
561 """Waits for all pending tasks to finish."""
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400562 logging.info('Waiting for all threads to die...')
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000563 if self._cpu_thread_pool:
564 self._cpu_thread_pool.join()
565 self._cpu_thread_pool.close()
566 self._cpu_thread_pool = None
567 if self._net_thread_pool:
568 self._net_thread_pool.join()
569 self._net_thread_pool.close()
570 self._net_thread_pool = None
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400571 logging.info('Done.')
572
573 def abort(self):
574 """Cancels any pending or future operations."""
575 # This is not strictly theadsafe, but in the worst case the logging message
576 # will be printed twice. Not a big deal. In other places it is assumed that
577 # unprotected reads and writes to _aborted are serializable (it is true
578 # for python) and thus no locking is used.
579 if not self._aborted:
580 logging.warning('Aborting... It can take a while.')
581 self._aborted = True
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000582
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000583 def __enter__(self):
584 """Context manager interface."""
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400585 assert not self._prev_sig_handlers, self._prev_sig_handlers
586 for s in (signal.SIGINT, signal.SIGTERM):
587 self._prev_sig_handlers[s] = signal.signal(s, lambda *_args: self.abort())
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000588 return self
589
590 def __exit__(self, _exc_type, _exc_value, _traceback):
591 """Context manager interface."""
592 self.close()
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400593 while self._prev_sig_handlers:
594 s, h = self._prev_sig_handlers.popitem()
595 signal.signal(s, h)
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000596 return False
597
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000598 def upload_items(self, items):
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000599 """Uploads a generator of Item to the isolate server.
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000600
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800601 It figures out what items are missing from the server and uploads only them.
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000602
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000603 It uses 3 threads internally:
604 - One to create batches based on a timeout
605 - One to dispatch the /contains RPC and field the missing entries
606 - One to field the /push RPC
607
608 The main threads enumerates 'items' and pushes to the first thread. Then it
609 join() all the threads, waiting for them to complete.
610
611 (enumerate items of Item, this can be slow as disk is traversed)
612 |
613 v
614 _create_items_batches_thread Thread #1
615 (generates list(Item), every 3s or 20~100 items)
616 |
617 v
618 _do_lookups_thread Thread #2
619 | |
620 v v
621 (missing) (was on server)
622 |
623 v
624 _handle_missing_thread Thread #3
625 |
626 v
627 (upload Item, append to uploaded)
628
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000629 Arguments:
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400630 items: list of isolate_storage.Item instances that represents data to
631 upload.
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000632
633 Returns:
634 List of items that were uploaded. All other items are already there.
635 """
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000636 incoming = Queue.Queue()
637 batches_to_lookup = Queue.Queue()
638 missing = Queue.Queue()
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000639 uploaded = []
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800640
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000641 def _create_items_batches_thread():
642 """Creates batches for /contains RPC lookup from individual items.
643
644 Input: incoming
645 Output: batches_to_lookup
646 """
647 try:
648 batch_size_index = 0
649 batch_size = ITEMS_PER_CONTAINS_QUERIES[batch_size_index]
650 batch = []
651 while not self._aborted:
652 try:
653 item = incoming.get(True, timeout=3)
654 if item:
655 batch.append(item)
656 except Queue.Empty:
657 item = False
658 if len(batch) == batch_size or (not item and batch):
659 if len(batch) == batch_size:
660 batch_size_index += 1
661 batch_size = ITEMS_PER_CONTAINS_QUERIES[
662 min(batch_size_index, len(ITEMS_PER_CONTAINS_QUERIES)-1)]
663 batches_to_lookup.put(batch)
664 batch = []
665 if item is None:
666 break
667 finally:
668 # Unblock the next pipeline.
669 batches_to_lookup.put(None)
670
671 def _do_lookups_thread():
672 """Enqueues all the /contains RPCs and emits the missing items.
673
674 Input: batches_to_lookup
675 Output: missing, to_upload
676 """
677 try:
678 channel = threading_utils.TaskChannel()
679 def _contains(b):
680 if self._aborted:
681 raise Aborted()
682 return self._storage_api.contains(b)
683
684 pending_contains = 0
685 while not self._aborted:
686 batch = batches_to_lookup.get()
687 if batch is None:
688 break
689 self.net_thread_pool.add_task_with_channel(
690 channel, threading_utils.PRIORITY_HIGH, _contains, batch)
691 pending_contains += 1
692 while pending_contains and not self._aborted:
693 try:
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +0000694 v = channel.next(timeout=0)
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000695 except threading_utils.TaskChannel.Timeout:
696 break
697 pending_contains -= 1
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +0000698 for missing_item, push_state in v.items():
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000699 missing.put((missing_item, push_state))
700 while pending_contains and not self._aborted:
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +0000701 for missing_item, push_state in channel.next().items():
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000702 missing.put((missing_item, push_state))
703 pending_contains -= 1
704 finally:
705 # Unblock the next pipeline.
706 missing.put((None, None))
707
708 def _handle_missing_thread():
709 """Sends the missing items to the uploader.
710
711 Input: missing
712 Output: uploaded
713 """
Vadim Shtayura3148e072014-09-02 18:51:52 -0700714 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000715 channel = threading_utils.TaskChannel()
716 pending_upload = 0
717 while not self._aborted:
718 try:
719 missing_item, push_state = missing.get(True, timeout=5)
720 if missing_item is None:
721 break
722 self._async_push(channel, missing_item, push_state)
723 pending_upload += 1
724 except Queue.Empty:
725 pass
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000726 detector.ping()
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000727 while not self._aborted and pending_upload:
728 try:
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +0000729 item = channel.next(timeout=0)
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000730 except threading_utils.TaskChannel.Timeout:
731 break
732 uploaded.append(item)
733 pending_upload -= 1
734 logging.debug(
735 'Uploaded %d; %d pending: %s (%d)',
736 len(uploaded), pending_upload, item.digest, item.size)
737 while not self._aborted and pending_upload:
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +0000738 item = channel.next()
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000739 uploaded.append(item)
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000740 pending_upload -= 1
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000741 logging.debug(
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000742 'Uploaded %d; %d pending: %s (%d)',
743 len(uploaded), pending_upload, item.digest, item.size)
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000744
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000745 threads = [
746 threading.Thread(target=_create_items_batches_thread),
747 threading.Thread(target=_do_lookups_thread),
748 threading.Thread(target=_handle_missing_thread),
749 ]
750 for t in threads:
751 t.start()
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000752
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000753 try:
754 # For each digest keep only first isolate_storage.Item that matches it.
755 # All other items are just indistinguishable copies from the point of view
756 # of isolate server (it doesn't care about paths at all, only content and
757 # digests).
758 seen = {}
759 try:
760 # TODO(maruel): Reorder the items as a priority queue, with larger items
761 # being processed first. This is, before hashing the data.
762 # This must be done in the primary thread since items can be a
763 # generator.
764 for item in items:
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000765 if seen.setdefault(item.digest, item) is item:
766 incoming.put(item)
767 finally:
768 incoming.put(None)
769 finally:
770 for t in threads:
771 t.join()
772
773 logging.info('All %s files are uploaded', len(uploaded))
Marc-Antoine Ruel73c0ae72018-11-30 14:05:45 +0000774 if seen:
775 _print_upload_stats(seen.values(), uploaded)
vadimsh@chromium.orgf24e5c32013-10-11 21:16:21 +0000776 return uploaded
777
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000778 def _async_push(self, channel, item, push_state):
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000779 """Starts asynchronous push to the server in a parallel thread.
780
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000781 Can be used only after |item| was checked for presence on a server with a
782 /contains RPC.
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800783
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000784 Arguments:
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000785 channel: TaskChannel that receives back |item| when upload ends.
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400786 item: item to upload as instance of isolate_storage.Item class.
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000787 push_state: push state returned by storage_api.contains(). It contains
788 storage specific information describing how to upload the item (for
789 example in case of cloud storage, it is signed upload URLs).
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800790
791 Returns:
792 None, but |channel| later receives back |item| when upload ends.
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000793 """
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800794 # Thread pool task priority.
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -0400795 priority = (
Vadim Shtayura3148e072014-09-02 18:51:52 -0700796 threading_utils.PRIORITY_HIGH if item.high_priority
797 else threading_utils.PRIORITY_MED)
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800798
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000799 def _push(content):
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400800 """Pushes an isolate_storage.Item and returns it to |channel|."""
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400801 if self._aborted:
802 raise Aborted()
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800803 self._storage_api.push(item, push_state, content)
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000804 return item
805
Wei Huang1a38fbe2017-11-28 22:55:22 -0500806 # If zipping is not required, just start a push task. Don't pass 'content'
807 # so that it can create a new generator when it retries on failures.
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000808 if not self.server_ref.is_with_compression:
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000809 self.net_thread_pool.add_task_with_channel(channel, priority, _push, None)
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000810 return
811
812 # If zipping is enabled, zip in a separate thread.
813 def zip_and_push():
814 # TODO(vadimsh): Implement streaming uploads. Before it's done, assemble
815 # content right here. It will block until all file is zipped.
816 try:
Vadim Shtayuraf9e401b2014-10-15 18:19:37 +0400817 if self._aborted:
818 raise Aborted()
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800819 stream = zip_compress(item.content(), item.compression_level)
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000820 data = ''.join(stream)
821 except Exception as exc:
822 logging.error('Failed to zip \'%s\': %s', item, exc)
Vadim Shtayura0ffc4092013-11-20 17:49:52 -0800823 channel.send_exception()
vadimsh@chromium.org7cdf1c02013-09-25 00:24:16 +0000824 return
Wei Huang1a38fbe2017-11-28 22:55:22 -0500825 # Pass '[data]' explicitly because the compressed data is not same as the
826 # one provided by 'item'. Since '[data]' is a list, it can safely be
827 # reused during retries.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000828 self.net_thread_pool.add_task_with_channel(
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000829 channel, priority, _push, [data])
vadimsh@chromium.orgbcb966b2013-10-01 18:14:18 +0000830 self.cpu_thread_pool.add_task(priority, zip_and_push)
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000831
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800832 def push(self, item, push_state):
833 """Synchronously pushes a single item to the server.
834
835 If you need to push many items at once, consider using 'upload_items' or
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000836 '_async_push' with instance of TaskChannel.
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800837
838 Arguments:
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400839 item: item to upload as instance of isolate_storage.Item class.
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000840 push_state: push state returned by storage_api.contains(). It contains
841 storage specific information describing how to upload the item (for
842 example in case of cloud storage, it is signed upload URLs).
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800843
844 Returns:
845 Pushed item (same object as |item|).
846 """
847 channel = threading_utils.TaskChannel()
Vadim Shtayura3148e072014-09-02 18:51:52 -0700848 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT):
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +0000849 self._async_push(channel, item, push_state)
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +0000850 pushed = channel.next()
Vadim Shtayurabcff74f2014-02-27 16:19:34 -0800851 assert pushed is item
852 return item
853
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000854 def async_fetch(self, channel, priority, digest, size, sink):
855 """Starts asynchronous fetch from the server in a parallel thread.
856
857 Arguments:
858 channel: TaskChannel that receives back |digest| when download ends.
859 priority: thread pool task priority for the fetch.
860 digest: hex digest of an item to download.
861 size: expected size of the item (after decompression).
862 sink: function that will be called as sink(generator).
863 """
864 def fetch():
865 try:
866 # Prepare reading pipeline.
Adrian Ludwinb4ebc092017-09-13 07:46:24 -0400867 stream = self._storage_api.fetch(digest, size, 0)
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000868 if self.server_ref.is_with_compression:
Marc-Antoine Ruel8bee66d2014-08-28 19:02:07 -0400869 stream = zip_decompress(stream, isolated_format.DISK_FILE_CHUNK)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000870 # Run |stream| through verifier that will assert its size.
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +0000871 verifier = FetchStreamVerifier(
872 stream, self.server_ref.hash_algo, digest, size)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000873 # Verified stream goes to |sink|.
874 sink(verifier.run())
875 except Exception as err:
Vadim Shtayura0ffc4092013-11-20 17:49:52 -0800876 logging.error('Failed to fetch %s: %s', digest, err)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000877 raise
878 return digest
879
880 # Don't bother with zip_thread_pool for decompression. Decompression is
881 # really fast and most probably IO bound anyway.
882 self.net_thread_pool.add_task_with_channel(channel, priority, fetch)
883
vadimsh@chromium.org35122be2013-09-19 02:48:00 +0000884
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000885class FetchQueue(object):
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400886 """Fetches items from Storage and places them into ContentAddressedCache.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000887
888 It manages multiple concurrent fetch operations. Acts as a bridge between
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400889 Storage and ContentAddressedCache so that Storage and ContentAddressedCache
890 don't depend on each other at all.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000891 """
892
893 def __init__(self, storage, cache):
894 self.storage = storage
895 self.cache = cache
896 self._channel = threading_utils.TaskChannel()
897 self._pending = set()
898 self._accessed = set()
Marc-Antoine Ruel5d7606b2018-06-15 19:06:12 +0000899 self._fetched = set(cache)
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400900 # Pending digests that the caller waits for, see wait_on()/wait().
901 self._waiting_on = set()
902 # Already fetched digests the caller waits for which are not yet returned by
903 # wait().
904 self._waiting_on_ready = set()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000905
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -0400906 def add(
Vadim Shtayura3148e072014-09-02 18:51:52 -0700907 self,
908 digest,
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -0400909 size=local_caching.UNKNOWN_FILE_SIZE,
Vadim Shtayura3148e072014-09-02 18:51:52 -0700910 priority=threading_utils.PRIORITY_MED):
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000911 """Starts asynchronous fetch of item |digest|."""
912 # Fetching it now?
913 if digest in self._pending:
914 return
915
916 # Mark this file as in use, verify_all_cached will later ensure it is still
917 # in cache.
918 self._accessed.add(digest)
919
920 # Already fetched? Notify cache to update item's LRU position.
921 if digest in self._fetched:
922 # 'touch' returns True if item is in cache and not corrupted.
923 if self.cache.touch(digest, size):
924 return
Marc-Antoine Ruel5d7606b2018-06-15 19:06:12 +0000925 logging.error('%s is corrupted', digest)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000926 self._fetched.remove(digest)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000927
928 # TODO(maruel): It should look at the free disk space, the current cache
929 # size and the size of the new item on every new item:
930 # - Trim the cache as more entries are listed when free disk space is low,
931 # otherwise if the amount of data downloaded during the run > free disk
932 # space, it'll crash.
933 # - Make sure there's enough free disk space to fit all dependencies of
934 # this run! If not, abort early.
935
936 # Start fetching.
937 self._pending.add(digest)
938 self.storage.async_fetch(
939 self._channel, priority, digest, size,
940 functools.partial(self.cache.write, digest))
941
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400942 def wait_on(self, digest):
943 """Updates digests to be waited on by 'wait'."""
944 # Calculate once the already fetched items. These will be retrieved first.
945 if digest in self._fetched:
946 self._waiting_on_ready.add(digest)
947 else:
948 self._waiting_on.add(digest)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000949
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400950 def wait(self):
951 """Waits until any of waited-on items is retrieved.
952
953 Once this happens, it is remove from the waited-on set and returned.
954
955 This function is called in two waves. The first wave it is done for HIGH
956 priority items, the isolated files themselves. The second wave it is called
957 for all the files.
958
959 If the waited-on set is empty, raises RuntimeError.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000960 """
961 # Flush any already fetched items.
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400962 if self._waiting_on_ready:
963 return self._waiting_on_ready.pop()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000964
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400965 assert self._waiting_on, 'Needs items to wait on'
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000966
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400967 # Wait for one waited-on item to be fetched.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000968 while self._pending:
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +0000969 digest = self._channel.next()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000970 self._pending.remove(digest)
971 self._fetched.add(digest)
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400972 if digest in self._waiting_on:
973 self._waiting_on.remove(digest)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000974 return digest
975
976 # Should never reach this point due to assert above.
977 raise RuntimeError('Impossible state')
978
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -0400979 @property
980 def wait_queue_empty(self):
981 """Returns True if there is no digest left for wait() to return."""
982 return not self._waiting_on and not self._waiting_on_ready
983
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000984 def inject_local_file(self, path, algo):
985 """Adds local file to the cache as if it was fetched from storage."""
maruel12e30012015-10-09 11:55:35 -0700986 with fs.open(path, 'rb') as f:
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +0000987 data = f.read()
988 digest = algo(data).hexdigest()
989 self.cache.write(digest, [data])
990 self._fetched.add(digest)
991 return digest
992
993 @property
994 def pending_count(self):
995 """Returns number of items to be fetched."""
996 return len(self._pending)
997
998 def verify_all_cached(self):
999 """True if all accessed items are in cache."""
Marc-Antoine Ruel5d7606b2018-06-15 19:06:12 +00001000 # Not thread safe, but called after all work is done.
1001 return self._accessed.issubset(self.cache)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001002
1003
1004class FetchStreamVerifier(object):
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001005 """Verifies that fetched file is valid before passing it to the
1006 ContentAddressedCache.
1007 """
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001008
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001009 def __init__(self, stream, hasher, expected_digest, expected_size):
1010 """Initializes the verifier.
1011
1012 Arguments:
1013 * stream: an iterable yielding chunks of content
1014 * hasher: an object from hashlib that supports update() and hexdigest()
1015 (eg, hashlib.sha1).
1016 * expected_digest: if the entire stream is piped through hasher and then
1017 summarized via hexdigest(), this should be the result. That is, it
1018 should be a hex string like 'abc123'.
1019 * expected_size: either the expected size of the stream, or
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001020 local_caching.UNKNOWN_FILE_SIZE.
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001021 """
Marc-Antoine Rueldf4976d2015-04-15 19:56:21 -04001022 assert stream is not None
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001023 self.stream = stream
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001024 self.expected_digest = expected_digest
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001025 self.expected_size = expected_size
1026 self.current_size = 0
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001027 self.rolling_hash = hasher()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001028
1029 def run(self):
1030 """Generator that yields same items as |stream|.
1031
1032 Verifies |stream| is complete before yielding a last chunk to consumer.
1033
1034 Also wraps IOError produced by consumer into MappingError exceptions since
1035 otherwise Storage will retry fetch on unrelated local cache errors.
1036 """
1037 # Read one chunk ahead, keep it in |stored|.
1038 # That way a complete stream can be verified before pushing last chunk
1039 # to consumer.
1040 stored = None
1041 for chunk in self.stream:
1042 assert chunk is not None
1043 if stored is not None:
1044 self._inspect_chunk(stored, is_last=False)
1045 try:
1046 yield stored
1047 except IOError as exc:
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -04001048 raise isolated_format.MappingError(
1049 'Failed to store an item in cache: %s' % exc)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001050 stored = chunk
1051 if stored is not None:
1052 self._inspect_chunk(stored, is_last=True)
1053 try:
1054 yield stored
1055 except IOError as exc:
Marc-Antoine Ruel52436aa2014-08-28 21:57:57 -04001056 raise isolated_format.MappingError(
1057 'Failed to store an item in cache: %s' % exc)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001058
1059 def _inspect_chunk(self, chunk, is_last):
1060 """Called for each fetched chunk before passing it to consumer."""
1061 self.current_size += len(chunk)
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001062 self.rolling_hash.update(chunk)
1063 if not is_last:
1064 return
1065
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001066 if ((self.expected_size != local_caching.UNKNOWN_FILE_SIZE) and
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001067 (self.expected_size != self.current_size)):
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001068 msg = 'Incorrect file size: want %d, got %d' % (
1069 self.expected_size, self.current_size)
Adrian Ludwin6d2a8342017-08-15 19:56:54 -04001070 raise IOError(msg)
1071
1072 actual_digest = self.rolling_hash.hexdigest()
1073 if self.expected_digest != actual_digest:
1074 msg = 'Incorrect digest: want %s, got %s' % (
1075 self.expected_digest, actual_digest)
Adrian Ludwin21920d52017-08-22 09:34:19 -04001076 raise IOError(msg)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001077
1078
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001079class IsolatedBundle(object):
1080 """Fetched and parsed .isolated file with all dependencies."""
1081
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001082 def __init__(self, filter_cb):
1083 """
1084 filter_cb: callback function to filter downloaded content.
1085 When filter_cb is not None, Isolated file is downloaded iff
1086 filter_cb(filepath) returns True.
1087 """
1088
Vadim Shtayura3148e072014-09-02 18:51:52 -07001089 self.command = []
1090 self.files = {}
1091 self.read_only = None
1092 self.relative_cwd = None
1093 # The main .isolated file, a IsolatedFile instance.
1094 self.root = None
1095
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001096 self._filter_cb = filter_cb
1097
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001098 def fetch(self, fetch_queue, root_isolated_hash, algo):
1099 """Fetches the .isolated and all the included .isolated.
Vadim Shtayura3148e072014-09-02 18:51:52 -07001100
1101 It enables support for "included" .isolated files. They are processed in
1102 strict order but fetched asynchronously from the cache. This is important so
1103 that a file in an included .isolated file that is overridden by an embedding
1104 .isolated file is not fetched needlessly. The includes are fetched in one
1105 pass and the files are fetched as soon as all the ones on the left-side
1106 of the tree were fetched.
1107
1108 The prioritization is very important here for nested .isolated files.
1109 'includes' have the highest priority and the algorithm is optimized for both
1110 deep and wide trees. A deep one is a long link of .isolated files referenced
1111 one at a time by one item in 'includes'. A wide one has a large number of
1112 'includes' in a single .isolated file. 'left' is defined as an included
1113 .isolated file earlier in the 'includes' list. So the order of the elements
1114 in 'includes' is important.
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001115
1116 As a side effect this method starts asynchronous fetch of all data files
1117 by adding them to |fetch_queue|. It doesn't wait for data files to finish
1118 fetching though.
Vadim Shtayura3148e072014-09-02 18:51:52 -07001119 """
1120 self.root = isolated_format.IsolatedFile(root_isolated_hash, algo)
1121
1122 # Isolated files being retrieved now: hash -> IsolatedFile instance.
1123 pending = {}
1124 # Set of hashes of already retrieved items to refuse recursive includes.
1125 seen = set()
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001126 # Set of IsolatedFile's whose data files have already being fetched.
1127 processed = set()
Vadim Shtayura3148e072014-09-02 18:51:52 -07001128
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001129 def retrieve_async(isolated_file):
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -04001130 """Retrieves an isolated file included by the root bundle."""
Vadim Shtayura3148e072014-09-02 18:51:52 -07001131 h = isolated_file.obj_hash
1132 if h in seen:
1133 raise isolated_format.IsolatedError(
1134 'IsolatedFile %s is retrieved recursively' % h)
1135 assert h not in pending
1136 seen.add(h)
1137 pending[h] = isolated_file
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -04001138 # This isolated item is being added dynamically, notify FetchQueue.
1139 fetch_queue.wait_on(h)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001140 fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH)
1141
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001142 # Start fetching root *.isolated file (single file, not the whole bundle).
1143 retrieve_async(self.root)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001144
1145 while pending:
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001146 # Wait until some *.isolated file is fetched, parse it.
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -04001147 item_hash = fetch_queue.wait()
Vadim Shtayura3148e072014-09-02 18:51:52 -07001148 item = pending.pop(item_hash)
tansell9e04a8d2016-07-28 09:31:59 -07001149 with fetch_queue.cache.getfileobj(item_hash) as f:
1150 item.load(f.read())
Vadim Shtayura3148e072014-09-02 18:51:52 -07001151
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001152 # Start fetching included *.isolated files.
Vadim Shtayura3148e072014-09-02 18:51:52 -07001153 for new_child in item.children:
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001154 retrieve_async(new_child)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001155
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001156 # Always fetch *.isolated files in traversal order, waiting if necessary
1157 # until next to-be-processed node loads. "Waiting" is done by yielding
1158 # back to the outer loop, that waits until some *.isolated is loaded.
1159 for node in isolated_format.walk_includes(self.root):
1160 if node not in processed:
1161 # Not visited, and not yet loaded -> wait for it to load.
1162 if not node.is_loaded:
1163 break
1164 # Not visited and loaded -> process it and continue the traversal.
1165 self._start_fetching_files(node, fetch_queue)
1166 processed.add(node)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001167
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001168 # All *.isolated files should be processed by now and only them.
1169 all_isolateds = set(isolated_format.walk_includes(self.root))
1170 assert all_isolateds == processed, (all_isolateds, processed)
Marc-Antoine Ruel2d631542018-04-19 20:28:09 -04001171 assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
Vadim Shtayura3148e072014-09-02 18:51:52 -07001172
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001173 # Extract 'command' and other bundle properties.
1174 for node in isolated_format.walk_includes(self.root):
1175 self._update_self(node)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001176 self.relative_cwd = self.relative_cwd or ''
1177
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001178 def _start_fetching_files(self, isolated, fetch_queue):
1179 """Starts fetching files from |isolated| that are not yet being fetched.
Vadim Shtayura3148e072014-09-02 18:51:52 -07001180
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001181 Modifies self.files.
1182 """
maruel10bea7b2016-12-07 05:03:49 -08001183 files = isolated.data.get('files', {})
1184 logging.debug('fetch_files(%s, %d)', isolated.obj_hash, len(files))
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +00001185 for filepath, properties in files.items():
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001186 if self._filter_cb and not self._filter_cb(filepath):
1187 continue
1188
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001189 # Root isolated has priority on the files being mapped. In particular,
1190 # overridden files must not be fetched.
1191 if filepath not in self.files:
1192 self.files[filepath] = properties
tansell9e04a8d2016-07-28 09:31:59 -07001193
1194 # Make sure if the isolated is read only, the mode doesn't have write
1195 # bits.
1196 if 'm' in properties and self.read_only:
1197 properties['m'] &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
1198
1199 # Preemptively request hashed files.
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001200 if 'h' in properties:
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001201 fetch_queue.add(
1202 properties['h'], properties['s'], threading_utils.PRIORITY_MED)
1203
1204 def _update_self(self, node):
1205 """Extracts bundle global parameters from loaded *.isolated file.
1206
1207 Will be called with each loaded *.isolated file in order of traversal of
1208 isolated include graph (see isolated_format.walk_includes).
1209 """
Vadim Shtayura3148e072014-09-02 18:51:52 -07001210 # Grabs properties.
1211 if not self.command and node.data.get('command'):
1212 # Ensure paths are correctly separated on windows.
1213 self.command = node.data['command']
1214 if self.command:
1215 self.command[0] = self.command[0].replace('/', os.path.sep)
Vadim Shtayura3148e072014-09-02 18:51:52 -07001216 if self.read_only is None and node.data.get('read_only') is not None:
1217 self.read_only = node.data['read_only']
1218 if (self.relative_cwd is None and
1219 node.data.get('relative_cwd') is not None):
1220 self.relative_cwd = node.data['relative_cwd']
1221
1222
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001223def get_storage(server_ref):
Vadim Shtayurabcff74f2014-02-27 16:19:34 -08001224 """Returns Storage class that can upload and download from |namespace|.
1225
1226 Arguments:
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001227 server_ref: isolate_storage.ServerRef instance.
Vadim Shtayurabcff74f2014-02-27 16:19:34 -08001228
1229 Returns:
1230 Instance of Storage.
1231 """
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001232 assert isinstance(server_ref, isolate_storage.ServerRef), repr(server_ref)
1233 return Storage(isolate_storage.get_storage_api(server_ref))
maruel@chromium.orgdedbf492013-09-12 20:42:11 +00001234
maruel@chromium.orgdedbf492013-09-12 20:42:11 +00001235
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001236def _map_file(dst, digest, props, cache, read_only, use_symlinks):
1237 """Put downloaded file to destination path. This function is used for multi
1238 threaded file putting.
1239 """
Takuto Ikuta523c6472019-09-18 02:53:34 +00001240 with tools.Profiler("_map_file for %s" % dst):
1241 with cache.getfileobj(digest) as srcfileobj:
1242 filetype = props.get('t', 'basic')
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001243
Takuto Ikuta523c6472019-09-18 02:53:34 +00001244 if filetype == 'basic':
1245 # Ignore all bits apart from the user.
1246 file_mode = (props.get('m') or 0o500) & 0o700
1247 if read_only:
1248 # Enforce read-only if the root bundle does.
1249 file_mode &= 0o500
1250 putfile(srcfileobj, dst, file_mode, use_symlink=use_symlinks)
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001251
Takuto Ikuta523c6472019-09-18 02:53:34 +00001252 elif filetype == 'tar':
1253 basedir = os.path.dirname(dst)
1254 with tarfile.TarFile(fileobj=srcfileobj, encoding='utf-8') as t:
1255 ensured_dirs = set()
1256 for ti in t:
1257 if not ti.isfile():
1258 logging.warning('Path(%r) is nonfile (%s), skipped', ti.name,
1259 ti.type)
1260 continue
1261 # Handle files created on Windows fetched on POSIX and the
1262 # reverse.
1263 other_sep = '/' if os.path.sep == '\\' else '\\'
1264 name = ti.name.replace(other_sep, os.path.sep)
1265 fp = os.path.normpath(os.path.join(basedir, name))
1266 if not fp.startswith(basedir):
1267 logging.error('Path(%r) is outside root directory', fp)
1268 ifd = t.extractfile(ti)
1269 fp_dir = os.path.dirname(fp)
1270 if fp_dir not in ensured_dirs:
1271 file_path.ensure_tree(fp_dir)
1272 ensured_dirs.add(fp_dir)
1273 file_mode = ti.mode & 0o700
1274 if read_only:
1275 # Enforce read-only if the root bundle does.
1276 file_mode &= 0o500
1277 putfile(ifd, fp, file_mode, ti.size)
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001278
Takuto Ikuta523c6472019-09-18 02:53:34 +00001279 else:
1280 raise isolated_format.IsolatedError('Unknown file type %r' % filetype)
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001281
1282
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001283def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks,
1284 filter_cb=None):
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001285 """Aggressively downloads the .isolated file(s), then download all the files.
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001286
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001287 Arguments:
1288 isolated_hash: hash of the root *.isolated file.
1289 storage: Storage class that communicates with isolate storage.
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001290 cache: ContentAddressedCache class that knows how to store and map files
1291 locally.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001292 outdir: Output directory to map file tree to.
maruel4409e302016-07-19 14:25:51 -07001293 use_symlinks: Use symlinks instead of hardlinks when True.
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001294 filter_cb: filter that works as whitelist for downloaded files.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001295
1296 Returns:
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001297 IsolatedBundle object that holds details about loaded *.isolated file.
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001298 """
Marc-Antoine Ruel4e8cd182014-06-18 13:27:17 -04001299 logging.debug(
maruel4409e302016-07-19 14:25:51 -07001300 'fetch_isolated(%s, %s, %s, %s, %s)',
1301 isolated_hash, storage, cache, outdir, use_symlinks)
Vadim Shtayurae0ab1902014-04-29 10:55:27 -07001302 # Hash algorithm to use, defined by namespace |storage| is using.
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001303 algo = storage.server_ref.hash_algo
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001304 fetch_queue = FetchQueue(storage, cache)
Takuto Ikuta1e6072c2018-11-06 20:42:43 +00001305 bundle = IsolatedBundle(filter_cb)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001306
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001307 with tools.Profiler('GetIsolateds'):
1308 # Optionally support local files by manually adding them to cache.
1309 if not isolated_format.is_valid_hash(isolated_hash, algo):
1310 logging.debug('%s is not a valid hash, assuming a file '
1311 '(algo was %s, hash size was %d)',
1312 isolated_hash, algo(), algo().digest_size)
Takuto Ikuta6e2ff962019-10-29 12:35:27 +00001313 path = six.text_type(os.path.abspath(isolated_hash))
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001314 try:
1315 isolated_hash = fetch_queue.inject_local_file(path, algo)
1316 except IOError as e:
1317 raise isolated_format.MappingError(
1318 '%s doesn\'t seem to be a valid file. Did you intent to pass a '
1319 'valid hash (error: %s)?' % (isolated_hash, e))
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001320
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001321 # Load all *.isolated and start loading rest of the files.
1322 bundle.fetch(fetch_queue, isolated_hash, algo)
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001323
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001324 with tools.Profiler('GetRest'):
1325 # Create file system hierarchy.
1326 file_path.ensure_tree(outdir)
1327 create_directories(outdir, bundle.files)
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +00001328 _create_symlinks(outdir, bundle.files.items())
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001329
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001330 # Ensure working directory exists.
1331 cwd = os.path.normpath(os.path.join(outdir, bundle.relative_cwd))
1332 file_path.ensure_tree(cwd)
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001333
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001334 # Multimap: digest -> list of pairs (path, props).
1335 remaining = {}
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +00001336 for filepath, props in bundle.files.items():
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001337 if 'h' in props:
1338 remaining.setdefault(props['h'], []).append((filepath, props))
1339 fetch_queue.wait_on(props['h'])
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001340
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001341 # Now block on the remaining files to be downloaded and mapped.
1342 logging.info('Retrieving remaining files (%d of them)...',
1343 fetch_queue.pending_count)
1344 last_update = time.time()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001345
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001346 with threading_utils.ThreadPool(2, 32, 32) as putfile_thread_pool:
1347 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
1348 while remaining:
1349 detector.ping()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001350
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001351 # Wait for any item to finish fetching to cache.
1352 digest = fetch_queue.wait()
tansell9e04a8d2016-07-28 09:31:59 -07001353
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001354 # Create the files in the destination using item in cache as the
1355 # source.
1356 for filepath, props in remaining.pop(digest):
1357 fullpath = os.path.join(outdir, filepath)
tanselle4288c32016-07-28 09:45:40 -07001358
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001359 putfile_thread_pool.add_task(threading_utils.PRIORITY_HIGH,
1360 _map_file, fullpath, digest,
1361 props, cache, bundle.read_only,
1362 use_symlinks)
tanselle4288c32016-07-28 09:45:40 -07001363
Takuto Ikutadeba39d2019-04-04 12:18:39 +00001364 # Report progress.
1365 duration = time.time() - last_update
1366 if duration > DELAY_BETWEEN_UPDATES_IN_SECS:
1367 msg = '%d files remaining...' % len(remaining)
1368 sys.stdout.write(msg + '\n')
1369 sys.stdout.flush()
1370 logging.info(msg)
1371 last_update = time.time()
1372 assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
1373 putfile_thread_pool.join()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001374
Marc-Antoine Ruele9558372018-08-03 03:41:22 +00001375 # Save the cache right away to not loose the state of the new objects.
1376 cache.save()
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001377 # Cache could evict some items we just tried to fetch, it's a fatal error.
1378 if not fetch_queue.verify_all_cached():
Marc-Antoine Rueldddf6172018-01-23 14:25:43 -05001379 free_disk = file_path.get_free_space(cache.cache_dir)
1380 msg = (
1381 'Cache is too small to hold all requested files.\n'
1382 ' %s\n cache=%dbytes, %d items; %sb free_space') % (
Marc-Antoine Ruel5d7606b2018-06-15 19:06:12 +00001383 cache.policies, cache.total_size, len(cache), free_disk)
Marc-Antoine Rueldddf6172018-01-23 14:25:43 -05001384 raise isolated_format.MappingError(msg)
Vadim Shtayura7f7459c2014-09-04 13:25:10 -07001385 return bundle
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001386
1387
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +00001388def _directory_to_metadata(root, algo, blacklist):
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001389 """Yields every file and/or symlink found.
1390
1391 Yields:
1392 tuple(FileItem, relpath, metadata)
1393 For a symlink, FileItem is None.
1394 """
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001395 # Current tar file bundle, if any.
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001396 root = file_path.get_native_path_case(root)
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +00001397 bundle = TarBundle(root, algo)
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001398 for relpath, issymlink in isolated_format.expand_directory_and_symlink(
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001399 root,
1400 u'.' + os.path.sep,
1401 blacklist,
1402 follow_symlinks=(sys.platform != 'win32')):
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001403
1404 filepath = os.path.join(root, relpath)
1405 if issymlink:
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +00001406 # TODO(maruel): Do not call this.
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001407 meta = isolated_format.file_to_metadata(filepath, 0, False)
1408 yield None, relpath, meta
1409 continue
1410
1411 prio = relpath.endswith('.isolated')
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +00001412 if bundle.try_add(FileItem(path=filepath, algo=algo, high_priority=prio)):
1413 # The file was added to the current pending tarball and won't be archived
1414 # individually.
1415 continue
1416
1417 # Flush and reset the bundle.
1418 for i, p, m in bundle.yield_item_path_meta():
1419 yield i, p, m
1420 bundle = TarBundle(root, algo)
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001421
1422 # Yield the file individually.
1423 item = FileItem(path=filepath, algo=algo, size=None, high_priority=prio)
1424 yield item, relpath, item.meta
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001425
Marc-Antoine Ruel440eee62018-12-04 22:37:05 +00001426 for i, p, m in bundle.yield_item_path_meta():
1427 yield i, p, m
1428
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001429
Marc-Antoine Ruelb69069b2018-11-28 20:50:40 +00001430def _print_upload_stats(items, missing):
1431 """Prints upload stats."""
1432 total = len(items)
1433 total_size = sum(f.size for f in items)
1434 logging.info(
1435 'Total: %6d, %9.1fkiB', total, total_size / 1024.)
1436 cache_hit = set(items).difference(missing)
1437 cache_hit_size = sum(f.size for f in cache_hit)
1438 logging.info(
1439 'cache hit: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
1440 len(cache_hit),
1441 cache_hit_size / 1024.,
1442 len(cache_hit) * 100. / total,
1443 cache_hit_size * 100. / total_size if total_size else 0)
1444 cache_miss = missing
1445 cache_miss_size = sum(f.size for f in cache_miss)
1446 logging.info(
1447 'cache miss: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
1448 len(cache_miss),
1449 cache_miss_size / 1024.,
1450 len(cache_miss) * 100. / total,
1451 cache_miss_size * 100. / total_size if total_size else 0)
1452
1453
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001454def _enqueue_dir(dirpath, blacklist, hash_algo, hash_algo_name):
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001455 """Called by archive_files_to_storage for a directory.
1456
1457 Create an .isolated file.
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001458
1459 Yields:
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001460 FileItem for every file found, plus one for the .isolated file itself.
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001461 """
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001462 files = {}
1463 for item, relpath, meta in _directory_to_metadata(
1464 dirpath, hash_algo, blacklist):
Marc-Antoine Ruel9cd5ef02018-11-29 23:47:34 +00001465 # item is None for a symlink.
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001466 files[relpath] = meta
Marc-Antoine Ruel9cd5ef02018-11-29 23:47:34 +00001467 if item:
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001468 yield item
1469
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001470 # TODO(maruel): If there' not file, don't yield an .isolated file.
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001471 data = {
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001472 'algo': hash_algo_name,
1473 'files': files,
1474 'version': isolated_format.ISOLATED_FILE_VERSION,
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001475 }
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001476 # Keep the file in memory. This is fine because .isolated files are relatively
1477 # small.
1478 yield BufferItem(
1479 tools.format_json(data, True), algo=hash_algo, high_priority=True)
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001480
1481
Vadim Shtayurae0ab1902014-04-29 10:55:27 -07001482def archive_files_to_storage(storage, files, blacklist):
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001483 """Stores every entry into remote storage and returns stats.
Marc-Antoine Ruel2283ad12014-02-09 11:14:57 -05001484
1485 Arguments:
1486 storage: a Storage object that communicates with the remote object store.
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001487 files: iterable of files to upload. If a directory is specified (with a
1488 trailing slash), a .isolated file is created and its hash is returned.
1489 Duplicates are skipped.
Marc-Antoine Ruel2283ad12014-02-09 11:14:57 -05001490 blacklist: function that returns True if a file should be omitted.
maruel064c0a32016-04-05 11:47:15 -07001491
1492 Returns:
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001493 tuple(OrderedDict(path: hash), list(FileItem cold), list(FileItem hot)).
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001494 The first file in the first item is always the .isolated file.
Marc-Antoine Ruel2283ad12014-02-09 11:14:57 -05001495 """
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001496 # Dict of path to hash.
1497 results = collections.OrderedDict()
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001498 hash_algo = storage.server_ref.hash_algo
1499 hash_algo_name = storage.server_ref.hash_algo_name
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001500 # Generator of FileItem to pass to upload_items() concurrent operation.
1501 channel = threading_utils.TaskChannel()
1502 uploaded_digests = set()
1503 def _upload_items():
1504 results = storage.upload_items(channel)
1505 uploaded_digests.update(f.digest for f in results)
1506 t = threading.Thread(target=_upload_items)
1507 t.start()
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001508
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001509 # Keep track locally of the items to determine cold and hot items.
1510 items_found = []
1511 try:
1512 for f in files:
Takuto Ikuta95459dd2019-10-29 12:39:47 +00001513 assert isinstance(f, six.text_type), repr(f)
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001514 if f in results:
1515 # Duplicate
1516 continue
1517 try:
1518 filepath = os.path.abspath(f)
1519 if fs.isdir(filepath):
1520 # Uploading a whole directory.
1521 item = None
1522 for item in _enqueue_dir(
1523 filepath, blacklist, hash_algo, hash_algo_name):
Marc-Antoine Ruelcc802b02018-11-28 21:05:01 +00001524 channel.send_result(item)
1525 items_found.append(item)
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001526 # The very last item will be the .isolated file.
1527 if not item:
1528 # There was no file in the directory.
1529 continue
1530 elif fs.isfile(filepath):
1531 item = FileItem(
1532 path=filepath,
1533 algo=hash_algo,
1534 size=None,
1535 high_priority=f.endswith('.isolated'))
1536 channel.send_result(item)
1537 items_found.append(item)
1538 else:
1539 raise Error('%s is neither a file or directory.' % f)
1540 results[f] = item.digest
1541 except OSError:
1542 raise Error('Failed to process %s.' % f)
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001543 finally:
Marc-Antoine Ruel1b2885d2018-12-04 18:30:33 +00001544 # Stops the generator, so _upload_items() can exit.
1545 channel.send_done()
1546 t.join()
1547
1548 cold = []
1549 hot = []
1550 for i in items_found:
1551 # Note that multiple FileItem may have the same .digest.
1552 if i.digest in uploaded_digests:
1553 cold.append(i)
1554 else:
1555 hot.append(i)
1556 return results, cold, hot
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001557
1558
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001559@subcommand.usage('<file1..fileN> or - to read from stdin')
1560def CMDarchive(parser, args):
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001561 """Archives data to the server.
1562
1563 If a directory is specified, a .isolated file is created the whole directory
1564 is uploaded. Then this .isolated file can be included in another one to run
1565 commands.
1566
1567 The commands output each file that was processed with its content hash. For
1568 directories, the .isolated generated for the directory is listed as the
1569 directory entry itself.
1570 """
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001571 add_isolate_server_options(parser)
Marc-Antoine Ruel1f8ba352014-11-04 15:55:03 -05001572 add_archive_options(parser)
maruel@chromium.orgcb3c3d52013-03-14 18:55:30 +00001573 options, files = parser.parse_args(args)
nodir55be77b2016-05-03 09:39:57 -07001574 process_isolate_server_options(parser, options, True, True)
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001575 server_ref = isolate_storage.ServerRef(
1576 options.isolate_server, options.namespace)
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001577 if files == ['-']:
1578 files = (l.rstrip('\n\r') for l in sys.stdin)
1579 if not files:
1580 parser.error('Nothing to upload')
1581 files = (f.decode('utf-8') for f in files)
1582 blacklist = tools.gen_blacklist(options.blacklist)
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001583 try:
Marc-Antoine Rueld0868ec2018-11-28 20:47:29 +00001584 with get_storage(server_ref) as storage:
1585 results, _cold, _hot = archive_files_to_storage(storage, files, blacklist)
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001586 except (Error, local_caching.NoMoreSpace) as e:
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001587 parser.error(e.args[0])
Marc-Antoine Ruel04903a32019-10-09 21:09:25 +00001588 print('\n'.join('%s %s' % (h, f) for f, h in results.items()))
Marc-Antoine Ruelfcc3cd82013-11-19 16:31:38 -05001589 return 0
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001590
1591
1592def CMDdownload(parser, args):
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001593 """Download data from the server.
1594
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001595 It can either download individual files or a complete tree from a .isolated
1596 file.
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001597 """
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001598 add_isolate_server_options(parser)
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001599 parser.add_option(
Marc-Antoine Ruel185ded42015-01-28 20:49:18 -05001600 '-s', '--isolated', metavar='HASH',
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001601 help='hash of an isolated file, .isolated file content is discarded, use '
1602 '--file if you need it')
1603 parser.add_option(
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001604 '-f', '--file', metavar='HASH DEST', default=[], action='append', nargs=2,
1605 help='hash and destination of a file, can be used multiple times')
1606 parser.add_option(
Marc-Antoine Ruelf90861c2015-03-24 20:54:49 -04001607 '-t', '--target', metavar='DIR', default='download',
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001608 help='destination directory')
maruel4409e302016-07-19 14:25:51 -07001609 parser.add_option(
1610 '--use-symlinks', action='store_true',
1611 help='Use symlinks instead of hardlinks')
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001612 add_cache_options(parser)
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001613 options, args = parser.parse_args(args)
1614 if args:
1615 parser.error('Unsupported arguments: %s' % args)
Marc-Antoine Ruel5028ba22017-08-25 17:37:51 -04001616 if not file_path.enable_symlink():
Marc-Antoine Ruel5a024272019-01-15 20:11:16 +00001617 logging.warning('Symlink support is not enabled')
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001618
nodir55be77b2016-05-03 09:39:57 -07001619 process_isolate_server_options(parser, options, True, True)
maruel@chromium.org4f2ebe42013-09-19 13:09:08 +00001620 if bool(options.isolated) == bool(options.file):
1621 parser.error('Use one of --isolated or --file, and only one.')
maruel4409e302016-07-19 14:25:51 -07001622 if not options.cache and options.use_symlinks:
1623 parser.error('--use-symlinks require the use of a cache with --cache')
maruel@chromium.orgb7e79a22013-09-13 01:24:56 +00001624
John Abd-El-Maleke3a85012018-05-29 20:10:44 -07001625 cache = process_cache_options(options, trim=True)
maruel2e8d0f52016-07-16 07:51:29 -07001626 cache.cleanup()
Takuto Ikuta6e2ff962019-10-29 12:35:27 +00001627 options.target = six.text_type(os.path.abspath(options.target))
Marc-Antoine Ruelf90861c2015-03-24 20:54:49 -04001628 if options.isolated:
maruel12e30012015-10-09 11:55:35 -07001629 if (fs.isfile(options.target) or
1630 (fs.isdir(options.target) and fs.listdir(options.target))):
Marc-Antoine Ruelf90861c2015-03-24 20:54:49 -04001631 parser.error(
1632 '--target \'%s\' exists, please use another target' % options.target)
Marc-Antoine Ruelb8513132018-11-20 19:48:53 +00001633 server_ref = isolate_storage.ServerRef(
1634 options.isolate_server, options.namespace)
1635 with get_storage(server_ref) as storage:
Vadim Shtayura3172be52013-12-03 12:49:05 -08001636 # Fetching individual files.
1637 if options.file:
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001638 # TODO(maruel): Enable cache in this case too.
Vadim Shtayura3172be52013-12-03 12:49:05 -08001639 channel = threading_utils.TaskChannel()
1640 pending = {}
1641 for digest, dest in options.file:
Takuto Ikuta6e2ff962019-10-29 12:35:27 +00001642 dest = six.text_type(dest)
Vadim Shtayura3172be52013-12-03 12:49:05 -08001643 pending[digest] = dest
1644 storage.async_fetch(
1645 channel,
Vadim Shtayura3148e072014-09-02 18:51:52 -07001646 threading_utils.PRIORITY_MED,
Vadim Shtayura3172be52013-12-03 12:49:05 -08001647 digest,
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001648 local_caching.UNKNOWN_FILE_SIZE,
1649 functools.partial(
1650 local_caching.file_write, os.path.join(options.target, dest)))
Vadim Shtayura3172be52013-12-03 12:49:05 -08001651 while pending:
Marc-Antoine Ruel4494b6c2018-11-28 21:00:41 +00001652 fetched = channel.next()
Vadim Shtayura3172be52013-12-03 12:49:05 -08001653 dest = pending.pop(fetched)
1654 logging.info('%s: %s', fetched, dest)
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001655
Vadim Shtayura3172be52013-12-03 12:49:05 -08001656 # Fetching whole isolated tree.
1657 if options.isolated:
Marc-Antoine Ruele79ddbf2018-06-13 18:33:07 +00001658 bundle = fetch_isolated(
1659 isolated_hash=options.isolated,
1660 storage=storage,
1661 cache=cache,
1662 outdir=options.target,
1663 use_symlinks=options.use_symlinks)
1664 cache.trim()
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001665 if bundle.command:
1666 rel = os.path.join(options.target, bundle.relative_cwd)
1667 print('To run this test please run from the directory %s:' %
1668 os.path.join(options.target, rel))
1669 print(' ' + ' '.join(bundle.command))
vadimsh@chromium.org7b5dae32013-10-03 16:59:59 +00001670
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001671 return 0
1672
1673
Marc-Antoine Ruel1f8ba352014-11-04 15:55:03 -05001674def add_archive_options(parser):
1675 parser.add_option(
1676 '--blacklist',
1677 action='append', default=list(DEFAULT_BLACKLIST),
1678 help='List of regexp to use as blacklist filter when uploading '
1679 'directories')
1680
1681
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001682def add_isolate_server_options(parser):
1683 """Adds --isolate-server and --namespace options to parser."""
Marc-Antoine Ruel1687b5e2014-02-06 17:47:53 -05001684 parser.add_option(
1685 '-I', '--isolate-server',
1686 metavar='URL', default=os.environ.get('ISOLATE_SERVER', ''),
Marc-Antoine Ruel8806e622014-02-12 14:15:53 -05001687 help='URL of the Isolate Server to use. Defaults to the environment '
1688 'variable ISOLATE_SERVER if set. No need to specify https://, this '
1689 'is assumed.')
Marc-Antoine Ruel1687b5e2014-02-06 17:47:53 -05001690 parser.add_option(
aludwind7b7b7e2017-06-29 16:38:50 -07001691 '--grpc-proxy', help='gRPC proxy by which to communicate to Isolate')
aludwin81178302016-11-30 17:18:49 -08001692 parser.add_option(
Marc-Antoine Ruel1687b5e2014-02-06 17:47:53 -05001693 '--namespace', default='default-gzip',
1694 help='The namespace to use on the Isolate Server, default: %default')
1695
1696
nodir55be77b2016-05-03 09:39:57 -07001697def process_isolate_server_options(
1698 parser, options, set_exception_handler, required):
1699 """Processes the --isolate-server option.
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001700
1701 Returns the identity as determined by the server.
Marc-Antoine Ruel1687b5e2014-02-06 17:47:53 -05001702 """
1703 if not options.isolate_server:
nodir55be77b2016-05-03 09:39:57 -07001704 if required:
1705 parser.error('--isolate-server is required.')
1706 return
1707
aludwind7b7b7e2017-06-29 16:38:50 -07001708 if options.grpc_proxy:
1709 isolate_storage.set_grpc_proxy(options.grpc_proxy)
aludwin81178302016-11-30 17:18:49 -08001710 else:
1711 try:
1712 options.isolate_server = net.fix_url(options.isolate_server)
1713 except ValueError as e:
1714 parser.error('--isolate-server %s' % e)
Marc-Antoine Ruele290ada2014-12-10 19:48:49 -05001715 if set_exception_handler:
1716 on_error.report_on_exception_exit(options.isolate_server)
Marc-Antoine Ruelf7d737d2014-12-10 15:36:29 -05001717 try:
1718 return auth.ensure_logged_in(options.isolate_server)
1719 except ValueError as e:
1720 parser.error(str(e))
Marc-Antoine Ruel793bff32019-04-18 17:50:48 +00001721 return None
Marc-Antoine Ruel8806e622014-02-12 14:15:53 -05001722
Marc-Antoine Ruel1687b5e2014-02-06 17:47:53 -05001723
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001724def add_cache_options(parser):
1725 cache_group = optparse.OptionGroup(parser, 'Cache management')
1726 cache_group.add_option(
Marc-Antoine Ruel5aeb3bb2018-06-16 13:11:02 +00001727 '--cache', metavar='DIR', default='cache',
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001728 help='Directory to keep a local cache of the files. Accelerates download '
1729 'by reusing already downloaded files. Default=%default')
1730 cache_group.add_option(
1731 '--max-cache-size',
1732 type='int',
1733 metavar='NNN',
maruel71586102016-01-29 11:44:09 -08001734 default=50*1024*1024*1024,
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001735 help='Trim if the cache gets larger than this value, default=%default')
1736 cache_group.add_option(
1737 '--min-free-space',
1738 type='int',
1739 metavar='NNN',
1740 default=2*1024*1024*1024,
1741 help='Trim if disk free space becomes lower than this value, '
1742 'default=%default')
1743 cache_group.add_option(
1744 '--max-items',
1745 type='int',
1746 metavar='NNN',
1747 default=100000,
1748 help='Trim if more than this number of items are in the cache '
1749 'default=%default')
1750 parser.add_option_group(cache_group)
1751
1752
John Abd-El-Maleke3a85012018-05-29 20:10:44 -07001753def process_cache_options(options, trim, **kwargs):
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001754 if options.cache:
Marc-Antoine Ruel34f5f282018-05-16 16:04:31 -04001755 policies = local_caching.CachePolicies(
1756 options.max_cache_size,
1757 options.min_free_space,
1758 options.max_items,
1759 # 3 weeks.
1760 max_age_secs=21*24*60*60)
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001761
Marc-Antoine Ruel2666d9c2018-05-18 13:52:02 -04001762 # |options.cache| path may not exist until DiskContentAddressedCache()
1763 # instance is created.
1764 return local_caching.DiskContentAddressedCache(
Takuto Ikuta6e2ff962019-10-29 12:35:27 +00001765 six.text_type(os.path.abspath(options.cache)), policies, trim, **kwargs)
Marc-Antoine Ruel793bff32019-04-18 17:50:48 +00001766 return local_caching.MemoryContentAddressedCache()
Marc-Antoine Ruela57d7db2014-10-15 20:31:19 -04001767
1768
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001769class OptionParserIsolateServer(logging_utils.OptionParserWithLogging):
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001770 def __init__(self, **kwargs):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001771 logging_utils.OptionParserWithLogging.__init__(
Marc-Antoine Ruelac54cb42013-11-18 14:05:35 -05001772 self,
1773 version=__version__,
1774 prog=os.path.basename(sys.modules[__name__].__file__),
1775 **kwargs)
Vadim Shtayurae34e13a2014-02-02 11:23:26 -08001776 auth.add_auth_options(self)
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001777
1778 def parse_args(self, *args, **kwargs):
Marc-Antoine Ruelf74cffe2015-07-15 15:21:34 -04001779 options, args = logging_utils.OptionParserWithLogging.parse_args(
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001780 self, *args, **kwargs)
Vadim Shtayura5d1efce2014-02-04 10:55:43 -08001781 auth.process_auth_options(self, options)
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001782 return options, args
1783
1784
1785def main(args):
1786 dispatcher = subcommand.CommandDispatcher(__name__)
Marc-Antoine Ruelcfb60852014-07-02 15:22:00 -04001787 return dispatcher.execute(OptionParserIsolateServer(), args)
maruel@chromium.orgc6f90062012-11-07 18:32:22 +00001788
1789
1790if __name__ == '__main__':
maruel8e4e40c2016-05-30 06:21:07 -07001791 subprocess42.inhibit_os_error_reporting()
maruel@chromium.orgfb78d432013-08-28 21:22:40 +00001792 fix_encoding.fix_encoding()
1793 tools.disable_buffering()
1794 colorama.init()
maruel@chromium.orgcb3c3d52013-03-14 18:55:30 +00001795 sys.exit(main(sys.argv[1:]))