blob: b898b49fe9c41cffb4a78e768210a6695b8540a7 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showardb18134f2009-03-20 20:52:18 +000010import traceback, shutil, warnings, fcntl, pickle, logging, logging.config
jadmanskic09fc152008-10-15 17:56:59 +000011from autotest_lib.client.bin import fd_stack, sysinfo
mbligh09108442008-10-15 16:27:38 +000012from autotest_lib.client.common_lib import error, log, utils, packages
jadmanski043e1132008-11-19 17:10:32 +000013from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000014from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000015
16
mbligh084bc172008-10-18 14:02:45 +000017def _control_segment_path(name):
18 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000019 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000020 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000021
22
mbligh084bc172008-10-18 14:02:45 +000023CLIENT_CONTROL_FILENAME = 'control'
24SERVER_CONTROL_FILENAME = 'control.srv'
25MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000026
mbligh084bc172008-10-18 14:02:45 +000027CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
28CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
29CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000030INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000031CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000032
mbligh084bc172008-10-18 14:02:45 +000033VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000034REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000035
36
mbligh062ed152009-01-13 00:57:14 +000037# by default provide a stub that generates no site data
38def _get_site_job_data_dummy(job):
39 return {}
40
41
jadmanski10646442008-08-13 14:05:21 +000042# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000043get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000044 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000045 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000046
47
48class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000049 """
50 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000051
52 Properties:
53 autodir
54 The top level autotest directory (/usr/local/autotest).
55 serverdir
56 <autodir>/server/
57 clientdir
58 <autodir>/client/
59 conmuxdir
60 <autodir>/conmux/
61 testdir
62 <autodir>/server/tests/
63 site_testdir
64 <autodir>/server/site_tests/
65 control
66 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000067 drop_caches_between_iterations
68 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000069 """
70
71 STATUS_VERSION = 1
jadmanski16a7ff72009-04-01 18:19:53 +000072 WARNING_DISABLE_DELAY = 5
mblighc86113b2009-04-28 18:32:51 +000073 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000074
75 def __init__(self, control, args, resultdir, label, user, machines,
76 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000077 ssh_user='root', ssh_port=22, ssh_pass='',
78 group_name=''):
jadmanski10646442008-08-13 14:05:21 +000079 """
mbligh374f3412009-05-13 21:29:45 +000080 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000081
mbligh374f3412009-05-13 21:29:45 +000082 @param control The pathname of the control file.
83 @param args Passed to the control file.
84 @param resultdir Where to throw the results.
85 @param label Description of the job.
86 @param user Username for the job (email address).
87 @param client True if this is a client-side control file.
88 @param parse_job bool, should the results be through the TKO parser.
89 @param ssh_user The SSH username. [root]
90 @param ssh_port The SSH port number. [22]
91 @param ssh_pass The SSH passphrase, if needed.
92 @param group_name If supplied, this will be written out as
93 host_group_name in the keyvals file for the parser.
jadmanski10646442008-08-13 14:05:21 +000094 """
95 path = os.path.dirname(__file__)
96 self.autodir = os.path.abspath(os.path.join(path, '..'))
97 self.serverdir = os.path.join(self.autodir, 'server')
98 self.testdir = os.path.join(self.serverdir, 'tests')
99 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
100 self.tmpdir = os.path.join(self.serverdir, 'tmp')
101 self.conmuxdir = os.path.join(self.autodir, 'conmux')
102 self.clientdir = os.path.join(self.autodir, 'client')
103 self.toolsdir = os.path.join(self.autodir, 'client/tools')
104 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000105 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000106 else:
showard45ae8192008-11-05 19:32:53 +0000107 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000109 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000110 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000111 self.uncollected_log_file = os.path.join(resultdir,
112 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000113 self.debugdir = os.path.join(resultdir, 'debug')
114
mbligh80e1eba2008-11-19 00:26:18 +0000115 if not os.path.exists(resultdir):
116 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000117 if not os.path.exists(self.debugdir):
118 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000119 self.label = label
120 self.user = user
121 self.args = args
122 self.machines = machines
123 self.client = client
124 self.record_prefix = ''
125 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000126 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000127 self.ssh_user = ssh_user
128 self.ssh_port = ssh_port
129 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000130 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000131 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000132 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000133 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000134
135 self.stdout = fd_stack.fd_stack(1, sys.stdout)
136 self.stderr = fd_stack.fd_stack(2, sys.stderr)
137
mbligh80e1eba2008-11-19 00:26:18 +0000138 if resultdir:
139 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000140 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000141
jadmanski025099d2008-09-23 14:13:48 +0000142 if not os.access(self.tmpdir, os.W_OK):
143 try:
144 os.makedirs(self.tmpdir, 0700)
145 except os.error, e:
146 # Thrown if the directory already exists, which it may.
147 pass
148
mbligh2b92b862008-11-22 13:25:32 +0000149 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000150 self.tmpdir = os.path.join(tempfile.gettempdir(),
151 'autotest-' + getpass.getuser())
152 try:
153 os.makedirs(self.tmpdir, 0700)
154 except os.error, e:
155 # Thrown if the directory already exists, which it may.
156 # If the problem was something other than the
157 # directory already existing, this chmod should throw as well
158 # exception.
159 os.chmod(self.tmpdir, stat.S_IRWXU)
160
jadmanski10646442008-08-13 14:05:21 +0000161 job_data = {'label' : label, 'user' : user,
162 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000163 'status_version' : str(self.STATUS_VERSION),
164 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000165 if group_name:
166 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000167 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000168 # only write these keyvals out on the first job in a resultdir
169 if 'job_started' not in utils.read_keyval(self.resultdir):
170 job_data.update(get_site_job_data(self))
171 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000172
173 self.parse_job = parse_job
174 if self.parse_job and len(machines) == 1:
175 self.using_parser = True
176 self.init_parser(resultdir)
177 else:
178 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000179 self.pkgmgr = packages.PackageManager(self.autodir,
180 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000181 self.pkgdir = os.path.join(self.autodir, 'packages')
182
showard21baa452008-10-21 00:08:39 +0000183 self.num_tests_run = 0
184 self.num_tests_failed = 0
185
jadmanski550fdc22008-11-20 16:32:08 +0000186 self._register_subcommand_hooks()
187
188
jadmanskie432dd22009-01-30 15:04:51 +0000189 @staticmethod
190 def _load_control_file(path):
191 f = open(path)
192 try:
193 control_file = f.read()
194 finally:
195 f.close()
196 return re.sub('\r', '', control_file)
197
198
jadmanski550fdc22008-11-20 16:32:08 +0000199 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000200 """
201 Register some hooks into the subcommand modules that allow us
202 to properly clean up self.hosts created in forked subprocesses.
203 """
jadmanski550fdc22008-11-20 16:32:08 +0000204 def on_fork(cmd):
205 self._existing_hosts_on_fork = set(self.hosts)
206 def on_join(cmd):
207 new_hosts = self.hosts - self._existing_hosts_on_fork
208 for host in new_hosts:
209 host.close()
210 subcommand.subcommand.register_fork_hook(on_fork)
211 subcommand.subcommand.register_join_hook(on_join)
212
jadmanski10646442008-08-13 14:05:21 +0000213
214 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000215 """
216 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000217 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000218 the database if necessary.
219 """
jadmanski10646442008-08-13 14:05:21 +0000220 # redirect parser debugging to .parse.log
221 parse_log = os.path.join(resultdir, '.parse.log')
222 parse_log = open(parse_log, 'w', 0)
223 tko_utils.redirect_parser_debugging(parse_log)
224 # create a job model object and set up the db
225 self.results_db = tko_db.db(autocommit=True)
226 self.parser = status_lib.parser(self.STATUS_VERSION)
227 self.job_model = self.parser.make_job(resultdir)
228 self.parser.start(self.job_model)
229 # check if a job already exists in the db and insert it if
230 # it does not
231 job_idx = self.results_db.find_job(self.parse_job)
232 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000233 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000234 else:
mbligh2b92b862008-11-22 13:25:32 +0000235 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000236 self.job_model.index = job_idx
237 self.job_model.machine_idx = machine_idx
238
239
240 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000241 """
242 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000243 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000244 remaining test results to the results db)
245 """
jadmanski10646442008-08-13 14:05:21 +0000246 if not self.using_parser:
247 return
248 final_tests = self.parser.end()
249 for test in final_tests:
250 self.__insert_test(test)
251 self.using_parser = False
252
253
254 def verify(self):
255 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000256 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000257 if self.resultdir:
258 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000259 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000260 namespace = {'machines' : self.machines, 'job' : self,
261 'ssh_user' : self.ssh_user,
262 'ssh_port' : self.ssh_port,
263 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000264 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000265 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000266 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000267 self.record('ABORT', None, None, msg)
268 raise
269
270
271 def repair(self, host_protection):
272 if not self.machines:
273 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000274 if self.resultdir:
275 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000276 namespace = {'machines': self.machines, 'job': self,
277 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
278 'ssh_pass': self.ssh_pass,
279 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000280
mbligh0931b0a2009-04-08 17:44:48 +0000281 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000282
283
284 def precheck(self):
285 """
286 perform any additional checks in derived classes.
287 """
288 pass
289
290
291 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000292 """
293 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000294 """
295 pass
296
297
298 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000299 """
300 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000301 """
302 pass
303
304
jadmanski23afbec2008-09-17 18:12:07 +0000305 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000306 """
307 By default tests run test.cleanup
308 """
jadmanski23afbec2008-09-17 18:12:07 +0000309 self.run_test_cleanup = True
310
311
312 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000313 """
314 By default tests do not run test.cleanup
315 """
jadmanski23afbec2008-09-17 18:12:07 +0000316 self.run_test_cleanup = False
317
318
jadmanski10646442008-08-13 14:05:21 +0000319 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000320 """
321 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000322 """
323 return False
324
325
326 def parallel_simple(self, function, machines, log=True, timeout=None):
mbligh2b92b862008-11-22 13:25:32 +0000327 """
328 Run 'function' using parallel_simple, with an extra wrapper to handle
329 the necessary setup for continuous parsing, if possible. If continuous
330 parsing is already properly initialized then this should just work.
331 """
332 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000333 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000334 def wrapper(machine):
335 self.parse_job += "/" + machine
336 self.using_parser = True
337 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000338 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000339 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000340 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000341 self.init_parser(self.resultdir)
342 result = function(machine)
343 self.cleanup_parser()
344 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000345 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000346 def wrapper(machine):
347 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000348 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000349 machine_data = {'hostname' : machine,
350 'status_version' : str(self.STATUS_VERSION)}
351 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000352 result = function(machine)
353 return result
354 else:
355 wrapper = function
356 subcommand.parallel_simple(wrapper, machines, log, timeout)
357
358
jadmanskie432dd22009-01-30 15:04:51 +0000359 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000360 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000361 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000362 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000363 # for a normal job, make sure the uncollected logs file exists
364 # for a crashinfo-only run it should already exist, bail out otherwise
365 if self.resultdir and not os.path.exists(self.uncollected_log_file):
366 if only_collect_crashinfo:
367 # if this is a crashinfo-only run, and there were no existing
368 # uncollected logs, just bail out early
369 logging.info("No existing uncollected logs, "
370 "skipping crashinfo collection")
371 return
372 else:
373 log_file = open(self.uncollected_log_file, "w")
374 pickle.dump([], log_file)
375 log_file.close()
376
jadmanski10646442008-08-13 14:05:21 +0000377 # use a copy so changes don't affect the original dictionary
378 namespace = namespace.copy()
379 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000380 if control is None:
381 control = self.control
382 if control_file_dir is None:
383 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000384
385 self.aborted = False
386 namespace['machines'] = machines
387 namespace['args'] = self.args
388 namespace['job'] = self
389 namespace['ssh_user'] = self.ssh_user
390 namespace['ssh_port'] = self.ssh_port
391 namespace['ssh_pass'] = self.ssh_pass
392 test_start_time = int(time.time())
393
mbligh80e1eba2008-11-19 00:26:18 +0000394 if self.resultdir:
395 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000396 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000397 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000398 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000399
jadmanskicdd0c402008-09-19 21:21:31 +0000400 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000401 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000402 try:
403 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000404 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000405
jadmanskidef0c3c2009-03-25 20:07:10 +0000406 if not only_collect_crashinfo:
407 # determine the dir to write the control files to
408 cfd_specified = (control_file_dir
409 and control_file_dir is not self.USE_TEMP_DIR)
410 if cfd_specified:
411 temp_control_file_dir = None
412 else:
413 temp_control_file_dir = tempfile.mkdtemp(
414 suffix='temp_control_file_dir')
415 control_file_dir = temp_control_file_dir
416 server_control_file = os.path.join(control_file_dir,
417 SERVER_CONTROL_FILENAME)
418 client_control_file = os.path.join(control_file_dir,
419 CLIENT_CONTROL_FILENAME)
420 if self.client:
421 namespace['control'] = control
422 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000423 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
424 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000425 else:
426 utils.open_write_close(server_control_file, control)
427 self._execute_code(server_control_file, namespace)
jadmanski10646442008-08-13 14:05:21 +0000428
jadmanskidef0c3c2009-03-25 20:07:10 +0000429 # no error occured, so we don't need to collect crashinfo
430 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000431 finally:
mblighaebe3b62008-12-22 14:45:40 +0000432 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000433 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000434 try:
435 shutil.rmtree(temp_control_file_dir)
436 except Exception, e:
jadmanskie432dd22009-01-30 15:04:51 +0000437 print 'Error %s removing dir %s' % (e,
438 temp_control_file_dir)
439
jadmanskicdd0c402008-09-19 21:21:31 +0000440 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000441 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000442 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000443 # includes crashdumps
444 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000445 else:
mbligh084bc172008-10-18 14:02:45 +0000446 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000447 if self.uncollected_log_file:
448 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000449 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000450 if cleanup and machines:
451 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000452 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000453 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000454
455
mblighc86113b2009-04-28 18:32:51 +0000456 def set_test_tag(self, tag=''):
457 """Set tag to be added to test name of all following run_test steps."""
458 self.test_tag = tag
459
460
jadmanski10646442008-08-13 14:05:21 +0000461 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000462 """
463 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000464
465 tag
466 tag to add to testname
467 url
468 url of the test to run
469 """
470
471 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000472
473 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000474 if tag is None:
475 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000476 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000477 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000478 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000479
480 outputdir = os.path.join(self.resultdir, subdir)
481 if os.path.exists(outputdir):
482 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000483 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000484 raise error.TestError(msg)
485 os.mkdir(outputdir)
486
487 def group_func():
488 try:
489 test.runtest(self, url, tag, args, dargs)
490 except error.TestBaseException, e:
491 self.record(e.exit_status, subdir, testname, str(e))
492 raise
493 except Exception, e:
494 info = str(e) + "\n" + traceback.format_exc()
495 self.record('FAIL', subdir, testname, info)
496 raise
497 else:
mbligh2b92b862008-11-22 13:25:32 +0000498 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000499
500 result, exc_info = self._run_group(testname, subdir, group_func)
501 if exc_info and isinstance(exc_info[1], error.TestBaseException):
502 return False
503 elif exc_info:
504 raise exc_info[0], exc_info[1], exc_info[2]
505 else:
506 return True
jadmanski10646442008-08-13 14:05:21 +0000507
508
509 def _run_group(self, name, subdir, function, *args, **dargs):
510 """\
511 Underlying method for running something inside of a group.
512 """
jadmanskide292df2008-08-26 20:51:14 +0000513 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000514 old_record_prefix = self.record_prefix
515 try:
516 self.record('START', subdir, name)
517 self.record_prefix += '\t'
518 try:
519 result = function(*args, **dargs)
520 finally:
521 self.record_prefix = old_record_prefix
522 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000523 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000524 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000525 except Exception, e:
526 err_msg = str(e) + '\n'
527 err_msg += traceback.format_exc()
528 self.record('END ABORT', subdir, name, err_msg)
529 raise error.JobError(name + ' failed\n' + traceback.format_exc())
530 else:
531 self.record('END GOOD', subdir, name)
532
jadmanskide292df2008-08-26 20:51:14 +0000533 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000534
535
536 def run_group(self, function, *args, **dargs):
537 """\
538 function:
539 subroutine to run
540 *args:
541 arguments for the function
542 """
543
544 name = function.__name__
545
546 # Allow the tag for the group to be specified.
547 tag = dargs.pop('tag', None)
548 if tag:
549 name = tag
550
jadmanskide292df2008-08-26 20:51:14 +0000551 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000552
553
554 def run_reboot(self, reboot_func, get_kernel_func):
555 """\
556 A specialization of run_group meant specifically for handling
557 a reboot. Includes support for capturing the kernel version
558 after the reboot.
559
560 reboot_func: a function that carries out the reboot
561
562 get_kernel_func: a function that returns a string
563 representing the kernel version.
564 """
565
566 old_record_prefix = self.record_prefix
567 try:
568 self.record('START', None, 'reboot')
569 self.record_prefix += '\t'
570 reboot_func()
571 except Exception, e:
572 self.record_prefix = old_record_prefix
573 err_msg = str(e) + '\n' + traceback.format_exc()
574 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000575 raise
jadmanski10646442008-08-13 14:05:21 +0000576 else:
577 kernel = get_kernel_func()
578 self.record_prefix = old_record_prefix
579 self.record('END GOOD', None, 'reboot',
580 optional_fields={"kernel": kernel})
581
582
jadmanskie432dd22009-01-30 15:04:51 +0000583 def run_control(self, path):
584 """Execute a control file found at path (relative to the autotest
585 path). Intended for executing a control file within a control file,
586 not for running the top-level job control file."""
587 path = os.path.join(self.autodir, path)
588 control_file = self._load_control_file(path)
589 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
590
591
jadmanskic09fc152008-10-15 17:56:59 +0000592 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000593 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000594 on_every_test)
595
596
597 def add_sysinfo_logfile(self, file, on_every_test=False):
598 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
599
600
601 def _add_sysinfo_loggable(self, loggable, on_every_test):
602 if on_every_test:
603 self.sysinfo.test_loggables.add(loggable)
604 else:
605 self.sysinfo.boot_loggables.add(loggable)
606
607
jadmanski10646442008-08-13 14:05:21 +0000608 def record(self, status_code, subdir, operation, status='',
609 optional_fields=None):
610 """
611 Record job-level status
612
613 The intent is to make this file both machine parseable and
614 human readable. That involves a little more complexity, but
615 really isn't all that bad ;-)
616
617 Format is <status code>\t<subdir>\t<operation>\t<status>
618
mbligh1b3b3762008-09-25 02:46:34 +0000619 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000620 for valid status definition
621
622 subdir: MUST be a relevant subdirectory in the results,
623 or None, which will be represented as '----'
624
625 operation: description of what you ran (e.g. "dbench", or
626 "mkfs -t foobar /dev/sda9")
627
628 status: error message or "completed sucessfully"
629
630 ------------------------------------------------------------
631
632 Initial tabs indicate indent levels for grouping, and is
633 governed by self.record_prefix
634
635 multiline messages have secondary lines prefaced by a double
636 space (' ')
637
638 Executing this method will trigger the logging of all new
639 warnings to date from the various console loggers.
640 """
641 # poll all our warning loggers for new warnings
642 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000643 old_record_prefix = self.record_prefix
644 try:
645 if status_code.startswith("END "):
646 self.record_prefix += "\t"
647 for timestamp, msg in warnings:
648 self._record("WARN", None, None, msg, timestamp)
649 finally:
650 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000651
652 # write out the actual status log line
653 self._record(status_code, subdir, operation, status,
654 optional_fields=optional_fields)
655
656
657 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000658 """Poll all the warning loggers and extract any new warnings that have
659 been logged. If the warnings belong to a category that is currently
660 disabled, this method will discard them and they will no longer be
661 retrievable.
662
663 Returns a list of (timestamp, message) tuples, where timestamp is an
664 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000665 warnings = []
666 while True:
667 # pull in a line of output from every logger that has
668 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000669 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000670 closed_loggers = set()
671 for logger in loggers:
672 line = logger.readline()
673 # record any broken pipes (aka line == empty)
674 if len(line) == 0:
675 closed_loggers.add(logger)
676 continue
jadmanskif37df842009-02-11 00:03:26 +0000677 # parse out the warning
678 timestamp, msgtype, msg = line.split('\t', 2)
679 timestamp = int(timestamp)
680 # if the warning is valid, add it to the results
681 if self.warning_manager.is_valid(timestamp, msgtype):
682 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000683
684 # stop listening to loggers that are closed
685 self.warning_loggers -= closed_loggers
686
687 # stop if none of the loggers have any output left
688 if not loggers:
689 break
690
691 # sort into timestamp order
692 warnings.sort()
693 return warnings
694
695
jadmanski16a7ff72009-04-01 18:19:53 +0000696 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000697 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000698 self.record("INFO", None, None,
699 "disabling %s warnings" % warning_type,
700 {"warnings.disable": warning_type})
701 time.sleep(self.WARNING_DISABLE_DELAY)
jadmanskif37df842009-02-11 00:03:26 +0000702
703
jadmanski16a7ff72009-04-01 18:19:53 +0000704 def enable_warnings(self, warning_type):
705 time.sleep(self.WARNING_DISABLE_DELAY)
jadmanskif37df842009-02-11 00:03:26 +0000706 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000707 self.record("INFO", None, None,
708 "enabling %s warnings" % warning_type,
709 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000710
711
jadmanski779bd292009-03-19 17:33:33 +0000712 def get_status_log_path(self, subdir=None):
713 """Return the path to the job status log.
714
715 @param subdir - Optional paramter indicating that you want the path
716 to a subdirectory status log.
717
718 @returns The path where the status log should be.
719 """
mbligh210bae62009-04-01 18:33:13 +0000720 if self.resultdir:
721 if subdir:
722 return os.path.join(self.resultdir, subdir, "status.log")
723 else:
724 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000725 else:
mbligh210bae62009-04-01 18:33:13 +0000726 return None
jadmanski779bd292009-03-19 17:33:33 +0000727
728
jadmanski6bb32d72009-03-19 20:25:24 +0000729 def _update_uncollected_logs_list(self, update_func):
730 """Updates the uncollected logs list in a multi-process safe manner.
731
732 @param update_func - a function that updates the list of uncollected
733 logs. Should take one parameter, the list to be updated.
734 """
mbligha788dc42009-03-26 21:10:16 +0000735 if self.uncollected_log_file:
736 log_file = open(self.uncollected_log_file, "r+")
737 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000738 try:
739 uncollected_logs = pickle.load(log_file)
740 update_func(uncollected_logs)
741 log_file.seek(0)
742 log_file.truncate()
743 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000744 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000745 finally:
746 fcntl.flock(log_file, fcntl.LOCK_UN)
747 log_file.close()
748
749
750 def add_client_log(self, hostname, remote_path, local_path):
751 """Adds a new set of client logs to the list of uncollected logs,
752 to allow for future log recovery.
753
754 @param host - the hostname of the machine holding the logs
755 @param remote_path - the directory on the remote machine holding logs
756 @param local_path - the local directory to copy the logs into
757 """
758 def update_func(logs_list):
759 logs_list.append((hostname, remote_path, local_path))
760 self._update_uncollected_logs_list(update_func)
761
762
763 def remove_client_log(self, hostname, remote_path, local_path):
764 """Removes a set of client logs from the list of uncollected logs,
765 to allow for future log recovery.
766
767 @param host - the hostname of the machine holding the logs
768 @param remote_path - the directory on the remote machine holding logs
769 @param local_path - the local directory to copy the logs into
770 """
771 def update_func(logs_list):
772 logs_list.remove((hostname, remote_path, local_path))
773 self._update_uncollected_logs_list(update_func)
774
775
jadmanski10646442008-08-13 14:05:21 +0000776 def _render_record(self, status_code, subdir, operation, status='',
777 epoch_time=None, record_prefix=None,
778 optional_fields=None):
779 """
780 Internal Function to generate a record to be written into a
781 status log. For use by server_job.* classes only.
782 """
783 if subdir:
784 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000785 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000786 substr = subdir
787 else:
788 substr = '----'
789
mbligh1b3b3762008-09-25 02:46:34 +0000790 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000791 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000792 if not operation:
793 operation = '----'
794 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000795 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000796 operation = operation.rstrip()
797 status = status.rstrip()
798 status = re.sub(r"\t", " ", status)
799 # Ensure any continuation lines are marked so we can
800 # detect them in the status file to ensure it is parsable.
801 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
802
803 if not optional_fields:
804 optional_fields = {}
805
806 # Generate timestamps for inclusion in the logs
807 if epoch_time is None:
808 epoch_time = int(time.time())
809 local_time = time.localtime(epoch_time)
810 optional_fields["timestamp"] = str(epoch_time)
811 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
812 local_time)
813
814 fields = [status_code, substr, operation]
815 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
816 fields.append(status)
817
818 if record_prefix is None:
819 record_prefix = self.record_prefix
820
821 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000822 return record_prefix + msg + '\n'
823
824
825 def _record_prerendered(self, msg):
826 """
827 Record a pre-rendered msg into the status logs. The only
828 change this makes to the message is to add on the local
829 indentation. Should not be called outside of server_job.*
830 classes. Unlike _record, this does not write the message
831 to standard output.
832 """
833 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000834 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000835 status_log = open(status_file, 'a')
836 for line in msg.splitlines():
837 line = self.record_prefix + line + '\n'
838 lines.append(line)
839 status_log.write(line)
840 status_log.close()
841 self.__parse_status(lines)
842
843
mbligh084bc172008-10-18 14:02:45 +0000844 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000845 """
846 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000847
848 This sets up the control file API by importing modules and making them
849 available under the appropriate names within namespace.
850
851 For use by _execute_code().
852
853 Args:
854 namespace: The namespace dictionary to fill in.
855 protect: Boolean. If True (the default) any operation that would
856 clobber an existing entry in namespace will cause an error.
857 Raises:
858 error.AutoservError: When a name would be clobbered by import.
859 """
860 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000861 """
862 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000863
864 Args:
865 module_name: The string module name.
866 names: A limiting list of names to import from module_name. If
867 empty (the default), all names are imported from the module
868 similar to a "from foo.bar import *" statement.
869 Raises:
870 error.AutoservError: When a name being imported would clobber
871 a name already in namespace.
872 """
873 module = __import__(module_name, {}, {}, names)
874
875 # No names supplied? Import * from the lowest level module.
876 # (Ugh, why do I have to implement this part myself?)
877 if not names:
878 for submodule_name in module_name.split('.')[1:]:
879 module = getattr(module, submodule_name)
880 if hasattr(module, '__all__'):
881 names = getattr(module, '__all__')
882 else:
883 names = dir(module)
884
885 # Install each name into namespace, checking to make sure it
886 # doesn't override anything that already exists.
887 for name in names:
888 # Check for conflicts to help prevent future problems.
889 if name in namespace and protect:
890 if namespace[name] is not getattr(module, name):
891 raise error.AutoservError('importing name '
892 '%s from %s %r would override %r' %
893 (name, module_name, getattr(module, name),
894 namespace[name]))
895 else:
896 # Encourage cleanliness and the use of __all__ for a
897 # more concrete API with less surprises on '*' imports.
898 warnings.warn('%s (%r) being imported from %s for use '
899 'in server control files is not the '
900 'first occurrance of that import.' %
901 (name, namespace[name], module_name))
902
903 namespace[name] = getattr(module, name)
904
905
906 # This is the equivalent of prepending a bunch of import statements to
907 # the front of the control script.
908 namespace.update(os=os, sys=sys)
909 _import_names('autotest_lib.server',
910 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
911 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
912 _import_names('autotest_lib.server.subcommand',
913 ('parallel', 'parallel_simple', 'subcommand'))
914 _import_names('autotest_lib.server.utils',
915 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
916 _import_names('autotest_lib.client.common_lib.error')
917 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
918
919 # Inject ourself as the job object into other classes within the API.
920 # (Yuck, this injection is a gross thing be part of a public API. -gps)
921 #
922 # XXX Base & SiteAutotest do not appear to use .job. Who does?
923 namespace['autotest'].Autotest.job = self
924 # server.hosts.base_classes.Host uses .job.
925 namespace['hosts'].Host.job = self
926
927
928 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000929 """
930 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000931
932 Unless protect_namespace is explicitly set to False, the dict will not
933 be modified.
934
935 Args:
936 code_file: The filename of the control file to execute.
937 namespace: A dict containing names to make available during execution.
938 protect: Boolean. If True (the default) a copy of the namespace dict
939 is used during execution to prevent the code from modifying its
940 contents outside of this function. If False the raw dict is
941 passed in and modifications will be allowed.
942 """
943 if protect:
944 namespace = namespace.copy()
945 self._fill_server_control_namespace(namespace, protect=protect)
946 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000947 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000948 machines_text = '\n'.join(self.machines) + '\n'
949 # Only rewrite the file if it does not match our machine list.
950 try:
951 machines_f = open(MACHINES_FILENAME, 'r')
952 existing_machines_text = machines_f.read()
953 machines_f.close()
954 except EnvironmentError:
955 existing_machines_text = None
956 if machines_text != existing_machines_text:
957 utils.open_write_close(MACHINES_FILENAME, machines_text)
958 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +0000959
960
961 def _record(self, status_code, subdir, operation, status='',
962 epoch_time=None, optional_fields=None):
963 """
964 Actual function for recording a single line into the status
965 logs. Should never be called directly, only by job.record as
966 this would bypass the console monitor logging.
967 """
968
mbligh2b92b862008-11-22 13:25:32 +0000969 msg = self._render_record(status_code, subdir, operation, status,
970 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +0000971
jadmanski779bd292009-03-19 17:33:33 +0000972 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000973 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +0000974 if status_file:
975 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000976 if subdir:
jadmanski779bd292009-03-19 17:33:33 +0000977 sub_status_file = self.get_status_log_path(subdir)
978 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000979 self.__parse_status(msg.splitlines())
980
981
982 def __parse_status(self, new_lines):
983 if not self.using_parser:
984 return
985 new_tests = self.parser.process_lines(new_lines)
986 for test in new_tests:
987 self.__insert_test(test)
988
989
990 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +0000991 """
992 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +0000993 database. This method will not raise an exception, even if an
994 error occurs during the insert, to avoid failing a test
995 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +0000996 self.num_tests_run += 1
997 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
998 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +0000999 try:
1000 self.results_db.insert_test(self.job_model, test)
1001 except Exception:
1002 msg = ("WARNING: An unexpected error occured while "
1003 "inserting test results into the database. "
1004 "Ignoring error.\n" + traceback.format_exc())
1005 print >> sys.stderr, msg
1006
mblighcaa62c22008-04-07 21:51:17 +00001007
mbligha7007722009-01-13 00:37:11 +00001008site_server_job = utils.import_site_class(
1009 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1010 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001011
mbligh0a8c3322009-04-28 18:32:19 +00001012class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001013 pass
jadmanskif37df842009-02-11 00:03:26 +00001014
1015
1016class warning_manager(object):
1017 """Class for controlling warning logs. Manages the enabling and disabling
1018 of warnings."""
1019 def __init__(self):
1020 # a map of warning types to a list of disabled time intervals
1021 self.disabled_warnings = {}
1022
1023
1024 def is_valid(self, timestamp, warning_type):
1025 """Indicates if a warning (based on the time it occured and its type)
1026 is a valid warning. A warning is considered "invalid" if this type of
1027 warning was marked as "disabled" at the time the warning occured."""
1028 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1029 for start, end in disabled_intervals:
1030 if timestamp >= start and (end is None or timestamp < end):
1031 return False
1032 return True
1033
1034
1035 def disable_warnings(self, warning_type, current_time_func=time.time):
1036 """As of now, disables all further warnings of this type."""
1037 intervals = self.disabled_warnings.setdefault(warning_type, [])
1038 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001039 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001040
1041
1042 def enable_warnings(self, warning_type, current_time_func=time.time):
1043 """As of now, enables all further warnings of this type."""
1044 intervals = self.disabled_warnings.get(warning_type, [])
1045 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001046 intervals[-1] = (intervals[-1][0], int(current_time_func()))