blob: b1ae6771498e4513f36424eeaaee25f1af8f0a4b [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
11from autotest_lib.client.bin import sysinfo
mbligh09108442008-10-15 16:27:38 +000012from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000013from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000014from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000015from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000016
17
mbligh084bc172008-10-18 14:02:45 +000018def _control_segment_path(name):
19 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000020 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000021 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000022
23
mbligh084bc172008-10-18 14:02:45 +000024CLIENT_CONTROL_FILENAME = 'control'
25SERVER_CONTROL_FILENAME = 'control.srv'
26MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000027
mbligh084bc172008-10-18 14:02:45 +000028CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
29CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
30CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000031INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000032CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000033
mbligh084bc172008-10-18 14:02:45 +000034VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000035REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000036
37
mbligh062ed152009-01-13 00:57:14 +000038# by default provide a stub that generates no site data
39def _get_site_job_data_dummy(job):
40 return {}
41
42
jadmanski10646442008-08-13 14:05:21 +000043# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000044get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000045 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000046 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000047
48
49class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000050 """
51 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000052
53 Properties:
54 autodir
55 The top level autotest directory (/usr/local/autotest).
56 serverdir
57 <autodir>/server/
58 clientdir
59 <autodir>/client/
60 conmuxdir
61 <autodir>/conmux/
62 testdir
63 <autodir>/server/tests/
64 site_testdir
65 <autodir>/server/site_tests/
66 control
67 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000068 drop_caches_between_iterations
69 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000070 """
71
72 STATUS_VERSION = 1
mblighc86113b2009-04-28 18:32:51 +000073 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000074
75 def __init__(self, control, args, resultdir, label, user, machines,
76 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000077 ssh_user='root', ssh_port=22, ssh_pass='',
78 group_name=''):
jadmanski10646442008-08-13 14:05:21 +000079 """
mbligh374f3412009-05-13 21:29:45 +000080 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000081
mbligh374f3412009-05-13 21:29:45 +000082 @param control The pathname of the control file.
83 @param args Passed to the control file.
84 @param resultdir Where to throw the results.
85 @param label Description of the job.
86 @param user Username for the job (email address).
87 @param client True if this is a client-side control file.
88 @param parse_job bool, should the results be through the TKO parser.
89 @param ssh_user The SSH username. [root]
90 @param ssh_port The SSH port number. [22]
91 @param ssh_pass The SSH passphrase, if needed.
92 @param group_name If supplied, this will be written out as
93 host_group_name in the keyvals file for the parser.
jadmanski10646442008-08-13 14:05:21 +000094 """
95 path = os.path.dirname(__file__)
96 self.autodir = os.path.abspath(os.path.join(path, '..'))
97 self.serverdir = os.path.join(self.autodir, 'server')
98 self.testdir = os.path.join(self.serverdir, 'tests')
99 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
100 self.tmpdir = os.path.join(self.serverdir, 'tmp')
101 self.conmuxdir = os.path.join(self.autodir, 'conmux')
102 self.clientdir = os.path.join(self.autodir, 'client')
103 self.toolsdir = os.path.join(self.autodir, 'client/tools')
104 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000105 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000106 else:
showard45ae8192008-11-05 19:32:53 +0000107 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000109 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000110 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000111 self.uncollected_log_file = os.path.join(resultdir,
112 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000113 self.debugdir = os.path.join(resultdir, 'debug')
114
mbligh80e1eba2008-11-19 00:26:18 +0000115 if not os.path.exists(resultdir):
116 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000117 if not os.path.exists(self.debugdir):
118 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000119 self.label = label
120 self.user = user
121 self.args = args
122 self.machines = machines
123 self.client = client
124 self.record_prefix = ''
125 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000126 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000127 self.ssh_user = ssh_user
128 self.ssh_port = ssh_port
129 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000130 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000131 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000132 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000133 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000134
showard75cdfee2009-06-10 17:40:41 +0000135 self.logging = logging_manager.get_logging_manager(
136 manage_stdout_and_stderr=True, redirect_fds=True)
137 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000138
mbligh80e1eba2008-11-19 00:26:18 +0000139 if resultdir:
140 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000141 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000142
jadmanski025099d2008-09-23 14:13:48 +0000143 if not os.access(self.tmpdir, os.W_OK):
144 try:
145 os.makedirs(self.tmpdir, 0700)
146 except os.error, e:
147 # Thrown if the directory already exists, which it may.
148 pass
149
mbligh2b92b862008-11-22 13:25:32 +0000150 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000151 self.tmpdir = os.path.join(tempfile.gettempdir(),
152 'autotest-' + getpass.getuser())
153 try:
154 os.makedirs(self.tmpdir, 0700)
155 except os.error, e:
156 # Thrown if the directory already exists, which it may.
157 # If the problem was something other than the
158 # directory already existing, this chmod should throw as well
159 # exception.
160 os.chmod(self.tmpdir, stat.S_IRWXU)
161
jadmanski10646442008-08-13 14:05:21 +0000162 job_data = {'label' : label, 'user' : user,
163 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000164 'status_version' : str(self.STATUS_VERSION),
165 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000166 if group_name:
167 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000168 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000169 # only write these keyvals out on the first job in a resultdir
170 if 'job_started' not in utils.read_keyval(self.resultdir):
171 job_data.update(get_site_job_data(self))
172 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000173
174 self.parse_job = parse_job
175 if self.parse_job and len(machines) == 1:
176 self.using_parser = True
177 self.init_parser(resultdir)
178 else:
179 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000180 self.pkgmgr = packages.PackageManager(self.autodir,
181 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000182 self.pkgdir = os.path.join(self.autodir, 'packages')
183
showard21baa452008-10-21 00:08:39 +0000184 self.num_tests_run = 0
185 self.num_tests_failed = 0
186
jadmanski550fdc22008-11-20 16:32:08 +0000187 self._register_subcommand_hooks()
188
189
jadmanskie432dd22009-01-30 15:04:51 +0000190 @staticmethod
191 def _load_control_file(path):
192 f = open(path)
193 try:
194 control_file = f.read()
195 finally:
196 f.close()
197 return re.sub('\r', '', control_file)
198
199
jadmanski550fdc22008-11-20 16:32:08 +0000200 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000201 """
202 Register some hooks into the subcommand modules that allow us
203 to properly clean up self.hosts created in forked subprocesses.
204 """
jadmanski550fdc22008-11-20 16:32:08 +0000205 def on_fork(cmd):
206 self._existing_hosts_on_fork = set(self.hosts)
207 def on_join(cmd):
208 new_hosts = self.hosts - self._existing_hosts_on_fork
209 for host in new_hosts:
210 host.close()
211 subcommand.subcommand.register_fork_hook(on_fork)
212 subcommand.subcommand.register_join_hook(on_join)
213
jadmanski10646442008-08-13 14:05:21 +0000214
215 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000216 """
217 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000218 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000219 the database if necessary.
220 """
jadmanski10646442008-08-13 14:05:21 +0000221 # redirect parser debugging to .parse.log
222 parse_log = os.path.join(resultdir, '.parse.log')
223 parse_log = open(parse_log, 'w', 0)
224 tko_utils.redirect_parser_debugging(parse_log)
225 # create a job model object and set up the db
226 self.results_db = tko_db.db(autocommit=True)
227 self.parser = status_lib.parser(self.STATUS_VERSION)
228 self.job_model = self.parser.make_job(resultdir)
229 self.parser.start(self.job_model)
230 # check if a job already exists in the db and insert it if
231 # it does not
232 job_idx = self.results_db.find_job(self.parse_job)
233 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000234 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000235 else:
mbligh2b92b862008-11-22 13:25:32 +0000236 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000237 self.job_model.index = job_idx
238 self.job_model.machine_idx = machine_idx
239
240
241 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000242 """
243 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000244 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000245 remaining test results to the results db)
246 """
jadmanski10646442008-08-13 14:05:21 +0000247 if not self.using_parser:
248 return
249 final_tests = self.parser.end()
250 for test in final_tests:
251 self.__insert_test(test)
252 self.using_parser = False
253
254
255 def verify(self):
256 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000257 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000258 if self.resultdir:
259 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000260 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000261 namespace = {'machines' : self.machines, 'job' : self,
262 'ssh_user' : self.ssh_user,
263 'ssh_port' : self.ssh_port,
264 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000265 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000266 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000267 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000268 self.record('ABORT', None, None, msg)
269 raise
270
271
272 def repair(self, host_protection):
273 if not self.machines:
274 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000275 if self.resultdir:
276 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000277 namespace = {'machines': self.machines, 'job': self,
278 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
279 'ssh_pass': self.ssh_pass,
280 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000281
mbligh0931b0a2009-04-08 17:44:48 +0000282 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000283
284
285 def precheck(self):
286 """
287 perform any additional checks in derived classes.
288 """
289 pass
290
291
292 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000293 """
294 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000295 """
296 pass
297
298
299 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000300 """
301 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000302 """
303 pass
304
305
jadmanski23afbec2008-09-17 18:12:07 +0000306 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000307 """
308 By default tests run test.cleanup
309 """
jadmanski23afbec2008-09-17 18:12:07 +0000310 self.run_test_cleanup = True
311
312
313 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000314 """
315 By default tests do not run test.cleanup
316 """
jadmanski23afbec2008-09-17 18:12:07 +0000317 self.run_test_cleanup = False
318
319
jadmanski10646442008-08-13 14:05:21 +0000320 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000321 """
322 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000323 """
324 return False
325
326
327 def parallel_simple(self, function, machines, log=True, timeout=None):
mbligh2b92b862008-11-22 13:25:32 +0000328 """
329 Run 'function' using parallel_simple, with an extra wrapper to handle
330 the necessary setup for continuous parsing, if possible. If continuous
331 parsing is already properly initialized then this should just work.
332 """
333 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000334 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000335 def wrapper(machine):
336 self.parse_job += "/" + machine
337 self.using_parser = True
338 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000339 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000340 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000341 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000342 self.init_parser(self.resultdir)
343 result = function(machine)
344 self.cleanup_parser()
345 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000346 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000347 def wrapper(machine):
348 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000349 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000350 machine_data = {'hostname' : machine,
351 'status_version' : str(self.STATUS_VERSION)}
352 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000353 result = function(machine)
354 return result
355 else:
356 wrapper = function
357 subcommand.parallel_simple(wrapper, machines, log, timeout)
358
359
jadmanskie432dd22009-01-30 15:04:51 +0000360 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000361 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000362 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000363 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000364 # for a normal job, make sure the uncollected logs file exists
365 # for a crashinfo-only run it should already exist, bail out otherwise
366 if self.resultdir and not os.path.exists(self.uncollected_log_file):
367 if only_collect_crashinfo:
368 # if this is a crashinfo-only run, and there were no existing
369 # uncollected logs, just bail out early
370 logging.info("No existing uncollected logs, "
371 "skipping crashinfo collection")
372 return
373 else:
374 log_file = open(self.uncollected_log_file, "w")
375 pickle.dump([], log_file)
376 log_file.close()
377
jadmanski10646442008-08-13 14:05:21 +0000378 # use a copy so changes don't affect the original dictionary
379 namespace = namespace.copy()
380 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000381 if control is None:
382 control = self.control
383 if control_file_dir is None:
384 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000385
386 self.aborted = False
387 namespace['machines'] = machines
388 namespace['args'] = self.args
389 namespace['job'] = self
390 namespace['ssh_user'] = self.ssh_user
391 namespace['ssh_port'] = self.ssh_port
392 namespace['ssh_pass'] = self.ssh_pass
393 test_start_time = int(time.time())
394
mbligh80e1eba2008-11-19 00:26:18 +0000395 if self.resultdir:
396 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000397 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000398 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000399 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000400
jadmanskicdd0c402008-09-19 21:21:31 +0000401 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000402 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000403 try:
404 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000405 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000406
jadmanskidef0c3c2009-03-25 20:07:10 +0000407 if not only_collect_crashinfo:
408 # determine the dir to write the control files to
409 cfd_specified = (control_file_dir
410 and control_file_dir is not self.USE_TEMP_DIR)
411 if cfd_specified:
412 temp_control_file_dir = None
413 else:
414 temp_control_file_dir = tempfile.mkdtemp(
415 suffix='temp_control_file_dir')
416 control_file_dir = temp_control_file_dir
417 server_control_file = os.path.join(control_file_dir,
418 SERVER_CONTROL_FILENAME)
419 client_control_file = os.path.join(control_file_dir,
420 CLIENT_CONTROL_FILENAME)
421 if self.client:
422 namespace['control'] = control
423 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000424 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
425 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000426 else:
427 utils.open_write_close(server_control_file, control)
428 self._execute_code(server_control_file, namespace)
jadmanski10646442008-08-13 14:05:21 +0000429
jadmanskidef0c3c2009-03-25 20:07:10 +0000430 # no error occured, so we don't need to collect crashinfo
431 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000432 finally:
mblighaebe3b62008-12-22 14:45:40 +0000433 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000434 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000435 try:
436 shutil.rmtree(temp_control_file_dir)
437 except Exception, e:
jadmanskie432dd22009-01-30 15:04:51 +0000438 print 'Error %s removing dir %s' % (e,
439 temp_control_file_dir)
440
jadmanskicdd0c402008-09-19 21:21:31 +0000441 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000442 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000443 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000444 # includes crashdumps
445 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000446 else:
mbligh084bc172008-10-18 14:02:45 +0000447 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000448 if self.uncollected_log_file:
449 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000450 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000451 if cleanup and machines:
452 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000453 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000454 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000455
456
mblighc86113b2009-04-28 18:32:51 +0000457 def set_test_tag(self, tag=''):
458 """Set tag to be added to test name of all following run_test steps."""
459 self.test_tag = tag
460
461
jadmanski10646442008-08-13 14:05:21 +0000462 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000463 """
464 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000465
466 tag
467 tag to add to testname
468 url
469 url of the test to run
470 """
471
472 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000473
474 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000475 if tag is None:
476 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000477 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000478 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000479 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000480
481 outputdir = os.path.join(self.resultdir, subdir)
482 if os.path.exists(outputdir):
483 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000484 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000485 raise error.TestError(msg)
486 os.mkdir(outputdir)
487
488 def group_func():
489 try:
490 test.runtest(self, url, tag, args, dargs)
491 except error.TestBaseException, e:
492 self.record(e.exit_status, subdir, testname, str(e))
493 raise
494 except Exception, e:
495 info = str(e) + "\n" + traceback.format_exc()
496 self.record('FAIL', subdir, testname, info)
497 raise
498 else:
mbligh2b92b862008-11-22 13:25:32 +0000499 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000500
501 result, exc_info = self._run_group(testname, subdir, group_func)
502 if exc_info and isinstance(exc_info[1], error.TestBaseException):
503 return False
504 elif exc_info:
505 raise exc_info[0], exc_info[1], exc_info[2]
506 else:
507 return True
jadmanski10646442008-08-13 14:05:21 +0000508
509
510 def _run_group(self, name, subdir, function, *args, **dargs):
511 """\
512 Underlying method for running something inside of a group.
513 """
jadmanskide292df2008-08-26 20:51:14 +0000514 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000515 old_record_prefix = self.record_prefix
516 try:
517 self.record('START', subdir, name)
518 self.record_prefix += '\t'
519 try:
520 result = function(*args, **dargs)
521 finally:
522 self.record_prefix = old_record_prefix
523 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000524 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000525 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000526 except Exception, e:
527 err_msg = str(e) + '\n'
528 err_msg += traceback.format_exc()
529 self.record('END ABORT', subdir, name, err_msg)
530 raise error.JobError(name + ' failed\n' + traceback.format_exc())
531 else:
532 self.record('END GOOD', subdir, name)
533
jadmanskide292df2008-08-26 20:51:14 +0000534 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000535
536
537 def run_group(self, function, *args, **dargs):
538 """\
539 function:
540 subroutine to run
541 *args:
542 arguments for the function
543 """
544
545 name = function.__name__
546
547 # Allow the tag for the group to be specified.
548 tag = dargs.pop('tag', None)
549 if tag:
550 name = tag
551
jadmanskide292df2008-08-26 20:51:14 +0000552 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000553
554
555 def run_reboot(self, reboot_func, get_kernel_func):
556 """\
557 A specialization of run_group meant specifically for handling
558 a reboot. Includes support for capturing the kernel version
559 after the reboot.
560
561 reboot_func: a function that carries out the reboot
562
563 get_kernel_func: a function that returns a string
564 representing the kernel version.
565 """
566
567 old_record_prefix = self.record_prefix
568 try:
569 self.record('START', None, 'reboot')
570 self.record_prefix += '\t'
571 reboot_func()
572 except Exception, e:
573 self.record_prefix = old_record_prefix
574 err_msg = str(e) + '\n' + traceback.format_exc()
575 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000576 raise
jadmanski10646442008-08-13 14:05:21 +0000577 else:
578 kernel = get_kernel_func()
579 self.record_prefix = old_record_prefix
580 self.record('END GOOD', None, 'reboot',
581 optional_fields={"kernel": kernel})
582
583
jadmanskie432dd22009-01-30 15:04:51 +0000584 def run_control(self, path):
585 """Execute a control file found at path (relative to the autotest
586 path). Intended for executing a control file within a control file,
587 not for running the top-level job control file."""
588 path = os.path.join(self.autodir, path)
589 control_file = self._load_control_file(path)
590 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
591
592
jadmanskic09fc152008-10-15 17:56:59 +0000593 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000594 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000595 on_every_test)
596
597
598 def add_sysinfo_logfile(self, file, on_every_test=False):
599 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
600
601
602 def _add_sysinfo_loggable(self, loggable, on_every_test):
603 if on_every_test:
604 self.sysinfo.test_loggables.add(loggable)
605 else:
606 self.sysinfo.boot_loggables.add(loggable)
607
608
jadmanski10646442008-08-13 14:05:21 +0000609 def record(self, status_code, subdir, operation, status='',
610 optional_fields=None):
611 """
612 Record job-level status
613
614 The intent is to make this file both machine parseable and
615 human readable. That involves a little more complexity, but
616 really isn't all that bad ;-)
617
618 Format is <status code>\t<subdir>\t<operation>\t<status>
619
mbligh1b3b3762008-09-25 02:46:34 +0000620 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000621 for valid status definition
622
623 subdir: MUST be a relevant subdirectory in the results,
624 or None, which will be represented as '----'
625
626 operation: description of what you ran (e.g. "dbench", or
627 "mkfs -t foobar /dev/sda9")
628
629 status: error message or "completed sucessfully"
630
631 ------------------------------------------------------------
632
633 Initial tabs indicate indent levels for grouping, and is
634 governed by self.record_prefix
635
636 multiline messages have secondary lines prefaced by a double
637 space (' ')
638
639 Executing this method will trigger the logging of all new
640 warnings to date from the various console loggers.
641 """
642 # poll all our warning loggers for new warnings
643 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000644 old_record_prefix = self.record_prefix
645 try:
646 if status_code.startswith("END "):
647 self.record_prefix += "\t"
648 for timestamp, msg in warnings:
649 self._record("WARN", None, None, msg, timestamp)
650 finally:
651 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000652
653 # write out the actual status log line
654 self._record(status_code, subdir, operation, status,
655 optional_fields=optional_fields)
656
657
658 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000659 """Poll all the warning loggers and extract any new warnings that have
660 been logged. If the warnings belong to a category that is currently
661 disabled, this method will discard them and they will no longer be
662 retrievable.
663
664 Returns a list of (timestamp, message) tuples, where timestamp is an
665 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000666 warnings = []
667 while True:
668 # pull in a line of output from every logger that has
669 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000670 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000671 closed_loggers = set()
672 for logger in loggers:
673 line = logger.readline()
674 # record any broken pipes (aka line == empty)
675 if len(line) == 0:
676 closed_loggers.add(logger)
677 continue
jadmanskif37df842009-02-11 00:03:26 +0000678 # parse out the warning
679 timestamp, msgtype, msg = line.split('\t', 2)
680 timestamp = int(timestamp)
681 # if the warning is valid, add it to the results
682 if self.warning_manager.is_valid(timestamp, msgtype):
683 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000684
685 # stop listening to loggers that are closed
686 self.warning_loggers -= closed_loggers
687
688 # stop if none of the loggers have any output left
689 if not loggers:
690 break
691
692 # sort into timestamp order
693 warnings.sort()
694 return warnings
695
696
jadmanski16a7ff72009-04-01 18:19:53 +0000697 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000698 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000699 self.record("INFO", None, None,
700 "disabling %s warnings" % warning_type,
701 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000702
703
jadmanski16a7ff72009-04-01 18:19:53 +0000704 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000705 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000706 self.record("INFO", None, None,
707 "enabling %s warnings" % warning_type,
708 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000709
710
jadmanski779bd292009-03-19 17:33:33 +0000711 def get_status_log_path(self, subdir=None):
712 """Return the path to the job status log.
713
714 @param subdir - Optional paramter indicating that you want the path
715 to a subdirectory status log.
716
717 @returns The path where the status log should be.
718 """
mbligh210bae62009-04-01 18:33:13 +0000719 if self.resultdir:
720 if subdir:
721 return os.path.join(self.resultdir, subdir, "status.log")
722 else:
723 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000724 else:
mbligh210bae62009-04-01 18:33:13 +0000725 return None
jadmanski779bd292009-03-19 17:33:33 +0000726
727
jadmanski6bb32d72009-03-19 20:25:24 +0000728 def _update_uncollected_logs_list(self, update_func):
729 """Updates the uncollected logs list in a multi-process safe manner.
730
731 @param update_func - a function that updates the list of uncollected
732 logs. Should take one parameter, the list to be updated.
733 """
mbligha788dc42009-03-26 21:10:16 +0000734 if self.uncollected_log_file:
735 log_file = open(self.uncollected_log_file, "r+")
736 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000737 try:
738 uncollected_logs = pickle.load(log_file)
739 update_func(uncollected_logs)
740 log_file.seek(0)
741 log_file.truncate()
742 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000743 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000744 finally:
745 fcntl.flock(log_file, fcntl.LOCK_UN)
746 log_file.close()
747
748
749 def add_client_log(self, hostname, remote_path, local_path):
750 """Adds a new set of client logs to the list of uncollected logs,
751 to allow for future log recovery.
752
753 @param host - the hostname of the machine holding the logs
754 @param remote_path - the directory on the remote machine holding logs
755 @param local_path - the local directory to copy the logs into
756 """
757 def update_func(logs_list):
758 logs_list.append((hostname, remote_path, local_path))
759 self._update_uncollected_logs_list(update_func)
760
761
762 def remove_client_log(self, hostname, remote_path, local_path):
763 """Removes a set of client logs from the list of uncollected logs,
764 to allow for future log recovery.
765
766 @param host - the hostname of the machine holding the logs
767 @param remote_path - the directory on the remote machine holding logs
768 @param local_path - the local directory to copy the logs into
769 """
770 def update_func(logs_list):
771 logs_list.remove((hostname, remote_path, local_path))
772 self._update_uncollected_logs_list(update_func)
773
774
jadmanski10646442008-08-13 14:05:21 +0000775 def _render_record(self, status_code, subdir, operation, status='',
776 epoch_time=None, record_prefix=None,
777 optional_fields=None):
778 """
779 Internal Function to generate a record to be written into a
780 status log. For use by server_job.* classes only.
781 """
782 if subdir:
783 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000784 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000785 substr = subdir
786 else:
787 substr = '----'
788
mbligh1b3b3762008-09-25 02:46:34 +0000789 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000790 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000791 if not operation:
792 operation = '----'
793 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000794 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000795 operation = operation.rstrip()
796 status = status.rstrip()
797 status = re.sub(r"\t", " ", status)
798 # Ensure any continuation lines are marked so we can
799 # detect them in the status file to ensure it is parsable.
800 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
801
802 if not optional_fields:
803 optional_fields = {}
804
805 # Generate timestamps for inclusion in the logs
806 if epoch_time is None:
807 epoch_time = int(time.time())
808 local_time = time.localtime(epoch_time)
809 optional_fields["timestamp"] = str(epoch_time)
810 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
811 local_time)
812
813 fields = [status_code, substr, operation]
814 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
815 fields.append(status)
816
817 if record_prefix is None:
818 record_prefix = self.record_prefix
819
820 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000821 return record_prefix + msg + '\n'
822
823
824 def _record_prerendered(self, msg):
825 """
826 Record a pre-rendered msg into the status logs. The only
827 change this makes to the message is to add on the local
828 indentation. Should not be called outside of server_job.*
829 classes. Unlike _record, this does not write the message
830 to standard output.
831 """
832 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000833 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000834 status_log = open(status_file, 'a')
835 for line in msg.splitlines():
836 line = self.record_prefix + line + '\n'
837 lines.append(line)
838 status_log.write(line)
839 status_log.close()
840 self.__parse_status(lines)
841
842
mbligh084bc172008-10-18 14:02:45 +0000843 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000844 """
845 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000846
847 This sets up the control file API by importing modules and making them
848 available under the appropriate names within namespace.
849
850 For use by _execute_code().
851
852 Args:
853 namespace: The namespace dictionary to fill in.
854 protect: Boolean. If True (the default) any operation that would
855 clobber an existing entry in namespace will cause an error.
856 Raises:
857 error.AutoservError: When a name would be clobbered by import.
858 """
859 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000860 """
861 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000862
863 Args:
864 module_name: The string module name.
865 names: A limiting list of names to import from module_name. If
866 empty (the default), all names are imported from the module
867 similar to a "from foo.bar import *" statement.
868 Raises:
869 error.AutoservError: When a name being imported would clobber
870 a name already in namespace.
871 """
872 module = __import__(module_name, {}, {}, names)
873
874 # No names supplied? Import * from the lowest level module.
875 # (Ugh, why do I have to implement this part myself?)
876 if not names:
877 for submodule_name in module_name.split('.')[1:]:
878 module = getattr(module, submodule_name)
879 if hasattr(module, '__all__'):
880 names = getattr(module, '__all__')
881 else:
882 names = dir(module)
883
884 # Install each name into namespace, checking to make sure it
885 # doesn't override anything that already exists.
886 for name in names:
887 # Check for conflicts to help prevent future problems.
888 if name in namespace and protect:
889 if namespace[name] is not getattr(module, name):
890 raise error.AutoservError('importing name '
891 '%s from %s %r would override %r' %
892 (name, module_name, getattr(module, name),
893 namespace[name]))
894 else:
895 # Encourage cleanliness and the use of __all__ for a
896 # more concrete API with less surprises on '*' imports.
897 warnings.warn('%s (%r) being imported from %s for use '
898 'in server control files is not the '
899 'first occurrance of that import.' %
900 (name, namespace[name], module_name))
901
902 namespace[name] = getattr(module, name)
903
904
905 # This is the equivalent of prepending a bunch of import statements to
906 # the front of the control script.
907 namespace.update(os=os, sys=sys)
908 _import_names('autotest_lib.server',
909 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
910 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
911 _import_names('autotest_lib.server.subcommand',
912 ('parallel', 'parallel_simple', 'subcommand'))
913 _import_names('autotest_lib.server.utils',
914 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
915 _import_names('autotest_lib.client.common_lib.error')
916 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
917
918 # Inject ourself as the job object into other classes within the API.
919 # (Yuck, this injection is a gross thing be part of a public API. -gps)
920 #
921 # XXX Base & SiteAutotest do not appear to use .job. Who does?
922 namespace['autotest'].Autotest.job = self
923 # server.hosts.base_classes.Host uses .job.
924 namespace['hosts'].Host.job = self
925
926
927 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000928 """
929 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000930
931 Unless protect_namespace is explicitly set to False, the dict will not
932 be modified.
933
934 Args:
935 code_file: The filename of the control file to execute.
936 namespace: A dict containing names to make available during execution.
937 protect: Boolean. If True (the default) a copy of the namespace dict
938 is used during execution to prevent the code from modifying its
939 contents outside of this function. If False the raw dict is
940 passed in and modifications will be allowed.
941 """
942 if protect:
943 namespace = namespace.copy()
944 self._fill_server_control_namespace(namespace, protect=protect)
945 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000946 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000947 machines_text = '\n'.join(self.machines) + '\n'
948 # Only rewrite the file if it does not match our machine list.
949 try:
950 machines_f = open(MACHINES_FILENAME, 'r')
951 existing_machines_text = machines_f.read()
952 machines_f.close()
953 except EnvironmentError:
954 existing_machines_text = None
955 if machines_text != existing_machines_text:
956 utils.open_write_close(MACHINES_FILENAME, machines_text)
957 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +0000958
959
960 def _record(self, status_code, subdir, operation, status='',
961 epoch_time=None, optional_fields=None):
962 """
963 Actual function for recording a single line into the status
964 logs. Should never be called directly, only by job.record as
965 this would bypass the console monitor logging.
966 """
967
mbligh2b92b862008-11-22 13:25:32 +0000968 msg = self._render_record(status_code, subdir, operation, status,
969 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +0000970
jadmanski779bd292009-03-19 17:33:33 +0000971 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000972 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +0000973 if status_file:
974 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000975 if subdir:
jadmanski779bd292009-03-19 17:33:33 +0000976 sub_status_file = self.get_status_log_path(subdir)
977 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000978 self.__parse_status(msg.splitlines())
979
980
981 def __parse_status(self, new_lines):
982 if not self.using_parser:
983 return
984 new_tests = self.parser.process_lines(new_lines)
985 for test in new_tests:
986 self.__insert_test(test)
987
988
989 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +0000990 """
991 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +0000992 database. This method will not raise an exception, even if an
993 error occurs during the insert, to avoid failing a test
994 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +0000995 self.num_tests_run += 1
996 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
997 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +0000998 try:
999 self.results_db.insert_test(self.job_model, test)
1000 except Exception:
1001 msg = ("WARNING: An unexpected error occured while "
1002 "inserting test results into the database. "
1003 "Ignoring error.\n" + traceback.format_exc())
1004 print >> sys.stderr, msg
1005
mblighcaa62c22008-04-07 21:51:17 +00001006
mbligha7007722009-01-13 00:37:11 +00001007site_server_job = utils.import_site_class(
1008 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1009 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001010
mbligh0a8c3322009-04-28 18:32:19 +00001011class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001012 pass
jadmanskif37df842009-02-11 00:03:26 +00001013
1014
1015class warning_manager(object):
1016 """Class for controlling warning logs. Manages the enabling and disabling
1017 of warnings."""
1018 def __init__(self):
1019 # a map of warning types to a list of disabled time intervals
1020 self.disabled_warnings = {}
1021
1022
1023 def is_valid(self, timestamp, warning_type):
1024 """Indicates if a warning (based on the time it occured and its type)
1025 is a valid warning. A warning is considered "invalid" if this type of
1026 warning was marked as "disabled" at the time the warning occured."""
1027 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1028 for start, end in disabled_intervals:
1029 if timestamp >= start and (end is None or timestamp < end):
1030 return False
1031 return True
1032
1033
1034 def disable_warnings(self, warning_type, current_time_func=time.time):
1035 """As of now, disables all further warnings of this type."""
1036 intervals = self.disabled_warnings.setdefault(warning_type, [])
1037 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001038 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001039
1040
1041 def enable_warnings(self, warning_type, current_time_func=time.time):
1042 """As of now, enables all further warnings of this type."""
1043 intervals = self.disabled_warnings.get(warning_type, [])
1044 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001045 intervals[-1] = (intervals[-1][0], int(current_time_func()))