blob: 946ed3463a3815f9a8c4d9ea07c85faedd9babaf [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showardb18134f2009-03-20 20:52:18 +000010import traceback, shutil, warnings, fcntl, pickle, logging, logging.config
jadmanskic09fc152008-10-15 17:56:59 +000011from autotest_lib.client.bin import fd_stack, sysinfo
mbligh09108442008-10-15 16:27:38 +000012from autotest_lib.client.common_lib import error, log, utils, packages
jadmanski043e1132008-11-19 17:10:32 +000013from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000014from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000015
16
mbligh084bc172008-10-18 14:02:45 +000017def _control_segment_path(name):
18 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000019 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000020 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000021
22
mbligh084bc172008-10-18 14:02:45 +000023CLIENT_CONTROL_FILENAME = 'control'
24SERVER_CONTROL_FILENAME = 'control.srv'
25MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000026
mbligh084bc172008-10-18 14:02:45 +000027CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
28CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
29CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000030INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000031CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000032
mbligh084bc172008-10-18 14:02:45 +000033VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000034REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000035
36
mbligh062ed152009-01-13 00:57:14 +000037# by default provide a stub that generates no site data
38def _get_site_job_data_dummy(job):
39 return {}
40
41
jadmanski10646442008-08-13 14:05:21 +000042# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000043get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000044 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000045 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000046
47
48class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000049 """
50 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000051
52 Properties:
53 autodir
54 The top level autotest directory (/usr/local/autotest).
55 serverdir
56 <autodir>/server/
57 clientdir
58 <autodir>/client/
59 conmuxdir
60 <autodir>/conmux/
61 testdir
62 <autodir>/server/tests/
63 site_testdir
64 <autodir>/server/site_tests/
65 control
66 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000067 drop_caches_between_iterations
68 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000069 """
70
71 STATUS_VERSION = 1
jadmanski16a7ff72009-04-01 18:19:53 +000072 WARNING_DISABLE_DELAY = 5
mblighc86113b2009-04-28 18:32:51 +000073 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000074
75 def __init__(self, control, args, resultdir, label, user, machines,
76 client=False, parse_job='',
77 ssh_user='root', ssh_port=22, ssh_pass=''):
78 """
mblighb5dac432008-11-27 00:38:44 +000079 Server side job object.
80
81 Parameters:
82 control: The control file (pathname of)
83 args: args to pass to the control file
84 resultdir: where to throw the results
85 label: label for the job
86 user: Username for the job (email address)
87 client: True if a client-side control file
jadmanski10646442008-08-13 14:05:21 +000088 """
89 path = os.path.dirname(__file__)
90 self.autodir = os.path.abspath(os.path.join(path, '..'))
91 self.serverdir = os.path.join(self.autodir, 'server')
92 self.testdir = os.path.join(self.serverdir, 'tests')
93 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
94 self.tmpdir = os.path.join(self.serverdir, 'tmp')
95 self.conmuxdir = os.path.join(self.autodir, 'conmux')
96 self.clientdir = os.path.join(self.autodir, 'client')
97 self.toolsdir = os.path.join(self.autodir, 'client/tools')
98 if control:
jadmanskie432dd22009-01-30 15:04:51 +000099 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000100 else:
showard45ae8192008-11-05 19:32:53 +0000101 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000102 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000103 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000104 if resultdir:
mbligha788dc42009-03-26 21:10:16 +0000105 self.uncollected_log_file = os.path.join(resultdir, "uncollected_logs")
106 self.debugdir = os.path.join(resultdir, 'debug')
107
mbligh80e1eba2008-11-19 00:26:18 +0000108 if not os.path.exists(resultdir):
109 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000110 if not os.path.exists(self.debugdir):
111 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000112 self.label = label
113 self.user = user
114 self.args = args
115 self.machines = machines
116 self.client = client
117 self.record_prefix = ''
118 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000119 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000120 self.ssh_user = ssh_user
121 self.ssh_port = ssh_port
122 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000123 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000124 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000125 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000126 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000127
128 self.stdout = fd_stack.fd_stack(1, sys.stdout)
129 self.stderr = fd_stack.fd_stack(2, sys.stderr)
130
mbligh80e1eba2008-11-19 00:26:18 +0000131 if resultdir:
132 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000133 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000134
jadmanski025099d2008-09-23 14:13:48 +0000135 if not os.access(self.tmpdir, os.W_OK):
136 try:
137 os.makedirs(self.tmpdir, 0700)
138 except os.error, e:
139 # Thrown if the directory already exists, which it may.
140 pass
141
mbligh2b92b862008-11-22 13:25:32 +0000142 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000143 self.tmpdir = os.path.join(tempfile.gettempdir(),
144 'autotest-' + getpass.getuser())
145 try:
146 os.makedirs(self.tmpdir, 0700)
147 except os.error, e:
148 # Thrown if the directory already exists, which it may.
149 # If the problem was something other than the
150 # directory already existing, this chmod should throw as well
151 # exception.
152 os.chmod(self.tmpdir, stat.S_IRWXU)
153
jadmanski10646442008-08-13 14:05:21 +0000154 job_data = {'label' : label, 'user' : user,
155 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000156 'status_version' : str(self.STATUS_VERSION),
157 'job_started' : str(int(time.time()))}
mbligh80e1eba2008-11-19 00:26:18 +0000158 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000159 # only write these keyvals out on the first job in a resultdir
160 if 'job_started' not in utils.read_keyval(self.resultdir):
161 job_data.update(get_site_job_data(self))
162 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000163
164 self.parse_job = parse_job
165 if self.parse_job and len(machines) == 1:
166 self.using_parser = True
167 self.init_parser(resultdir)
168 else:
169 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000170 self.pkgmgr = packages.PackageManager(self.autodir,
171 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000172 self.pkgdir = os.path.join(self.autodir, 'packages')
173
showard21baa452008-10-21 00:08:39 +0000174 self.num_tests_run = 0
175 self.num_tests_failed = 0
176
jadmanski550fdc22008-11-20 16:32:08 +0000177 self._register_subcommand_hooks()
178
179
jadmanskie432dd22009-01-30 15:04:51 +0000180 @staticmethod
181 def _load_control_file(path):
182 f = open(path)
183 try:
184 control_file = f.read()
185 finally:
186 f.close()
187 return re.sub('\r', '', control_file)
188
189
jadmanski550fdc22008-11-20 16:32:08 +0000190 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000191 """
192 Register some hooks into the subcommand modules that allow us
193 to properly clean up self.hosts created in forked subprocesses.
194 """
jadmanski550fdc22008-11-20 16:32:08 +0000195 def on_fork(cmd):
196 self._existing_hosts_on_fork = set(self.hosts)
197 def on_join(cmd):
198 new_hosts = self.hosts - self._existing_hosts_on_fork
199 for host in new_hosts:
200 host.close()
201 subcommand.subcommand.register_fork_hook(on_fork)
202 subcommand.subcommand.register_join_hook(on_join)
203
jadmanski10646442008-08-13 14:05:21 +0000204
205 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000206 """
207 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000208 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000209 the database if necessary.
210 """
jadmanski10646442008-08-13 14:05:21 +0000211 # redirect parser debugging to .parse.log
212 parse_log = os.path.join(resultdir, '.parse.log')
213 parse_log = open(parse_log, 'w', 0)
214 tko_utils.redirect_parser_debugging(parse_log)
215 # create a job model object and set up the db
216 self.results_db = tko_db.db(autocommit=True)
217 self.parser = status_lib.parser(self.STATUS_VERSION)
218 self.job_model = self.parser.make_job(resultdir)
219 self.parser.start(self.job_model)
220 # check if a job already exists in the db and insert it if
221 # it does not
222 job_idx = self.results_db.find_job(self.parse_job)
223 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000224 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000225 else:
mbligh2b92b862008-11-22 13:25:32 +0000226 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000227 self.job_model.index = job_idx
228 self.job_model.machine_idx = machine_idx
229
230
231 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000232 """
233 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000234 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000235 remaining test results to the results db)
236 """
jadmanski10646442008-08-13 14:05:21 +0000237 if not self.using_parser:
238 return
239 final_tests = self.parser.end()
240 for test in final_tests:
241 self.__insert_test(test)
242 self.using_parser = False
243
244
245 def verify(self):
246 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000247 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000248 if self.resultdir:
249 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000250 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000251 namespace = {'machines' : self.machines, 'job' : self,
252 'ssh_user' : self.ssh_user,
253 'ssh_port' : self.ssh_port,
254 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000255 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000256 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000257 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000258 self.record('ABORT', None, None, msg)
259 raise
260
261
262 def repair(self, host_protection):
263 if not self.machines:
264 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000265 if self.resultdir:
266 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000267 namespace = {'machines': self.machines, 'job': self,
268 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
269 'ssh_pass': self.ssh_pass,
270 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000271
mbligh0931b0a2009-04-08 17:44:48 +0000272 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000273
274
275 def precheck(self):
276 """
277 perform any additional checks in derived classes.
278 """
279 pass
280
281
282 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000283 """
284 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000285 """
286 pass
287
288
289 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000290 """
291 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000292 """
293 pass
294
295
jadmanski23afbec2008-09-17 18:12:07 +0000296 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000297 """
298 By default tests run test.cleanup
299 """
jadmanski23afbec2008-09-17 18:12:07 +0000300 self.run_test_cleanup = True
301
302
303 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000304 """
305 By default tests do not run test.cleanup
306 """
jadmanski23afbec2008-09-17 18:12:07 +0000307 self.run_test_cleanup = False
308
309
jadmanski10646442008-08-13 14:05:21 +0000310 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000311 """
312 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000313 """
314 return False
315
316
317 def parallel_simple(self, function, machines, log=True, timeout=None):
mbligh2b92b862008-11-22 13:25:32 +0000318 """
319 Run 'function' using parallel_simple, with an extra wrapper to handle
320 the necessary setup for continuous parsing, if possible. If continuous
321 parsing is already properly initialized then this should just work.
322 """
323 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000324 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000325 def wrapper(machine):
326 self.parse_job += "/" + machine
327 self.using_parser = True
328 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000329 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000330 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000331 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000332 self.init_parser(self.resultdir)
333 result = function(machine)
334 self.cleanup_parser()
335 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000336 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000337 def wrapper(machine):
338 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000339 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000340 machine_data = {'hostname' : machine,
341 'status_version' : str(self.STATUS_VERSION)}
342 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000343 result = function(machine)
344 return result
345 else:
346 wrapper = function
347 subcommand.parallel_simple(wrapper, machines, log, timeout)
348
349
jadmanskie432dd22009-01-30 15:04:51 +0000350 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000351 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000352 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000353 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000354 # for a normal job, make sure the uncollected logs file exists
355 # for a crashinfo-only run it should already exist, bail out otherwise
356 if self.resultdir and not os.path.exists(self.uncollected_log_file):
357 if only_collect_crashinfo:
358 # if this is a crashinfo-only run, and there were no existing
359 # uncollected logs, just bail out early
360 logging.info("No existing uncollected logs, "
361 "skipping crashinfo collection")
362 return
363 else:
364 log_file = open(self.uncollected_log_file, "w")
365 pickle.dump([], log_file)
366 log_file.close()
367
jadmanski10646442008-08-13 14:05:21 +0000368 # use a copy so changes don't affect the original dictionary
369 namespace = namespace.copy()
370 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000371 if control is None:
372 control = self.control
373 if control_file_dir is None:
374 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000375
376 self.aborted = False
377 namespace['machines'] = machines
378 namespace['args'] = self.args
379 namespace['job'] = self
380 namespace['ssh_user'] = self.ssh_user
381 namespace['ssh_port'] = self.ssh_port
382 namespace['ssh_pass'] = self.ssh_pass
383 test_start_time = int(time.time())
384
mbligh80e1eba2008-11-19 00:26:18 +0000385 if self.resultdir:
386 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000387 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000388 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000389 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000390
jadmanskicdd0c402008-09-19 21:21:31 +0000391 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000392 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000393 try:
394 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000395 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000396
jadmanskidef0c3c2009-03-25 20:07:10 +0000397 if not only_collect_crashinfo:
398 # determine the dir to write the control files to
399 cfd_specified = (control_file_dir
400 and control_file_dir is not self.USE_TEMP_DIR)
401 if cfd_specified:
402 temp_control_file_dir = None
403 else:
404 temp_control_file_dir = tempfile.mkdtemp(
405 suffix='temp_control_file_dir')
406 control_file_dir = temp_control_file_dir
407 server_control_file = os.path.join(control_file_dir,
408 SERVER_CONTROL_FILENAME)
409 client_control_file = os.path.join(control_file_dir,
410 CLIENT_CONTROL_FILENAME)
411 if self.client:
412 namespace['control'] = control
413 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000414 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
415 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000416 else:
417 utils.open_write_close(server_control_file, control)
418 self._execute_code(server_control_file, namespace)
jadmanski10646442008-08-13 14:05:21 +0000419
jadmanskidef0c3c2009-03-25 20:07:10 +0000420 # no error occured, so we don't need to collect crashinfo
421 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000422 finally:
mblighaebe3b62008-12-22 14:45:40 +0000423 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000424 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000425 try:
426 shutil.rmtree(temp_control_file_dir)
427 except Exception, e:
jadmanskie432dd22009-01-30 15:04:51 +0000428 print 'Error %s removing dir %s' % (e,
429 temp_control_file_dir)
430
jadmanskicdd0c402008-09-19 21:21:31 +0000431 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000432 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000433 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000434 # includes crashdumps
435 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000436 else:
mbligh084bc172008-10-18 14:02:45 +0000437 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000438 if self.uncollected_log_file:
439 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000440 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000441 if cleanup and machines:
442 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000443 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000444 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000445
446
mblighc86113b2009-04-28 18:32:51 +0000447 def set_test_tag(self, tag=''):
448 """Set tag to be added to test name of all following run_test steps."""
449 self.test_tag = tag
450
451
jadmanski10646442008-08-13 14:05:21 +0000452 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000453 """
454 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000455
456 tag
457 tag to add to testname
458 url
459 url of the test to run
460 """
461
462 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000463
464 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000465 if tag is None:
466 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000467 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000468 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000469 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000470
471 outputdir = os.path.join(self.resultdir, subdir)
472 if os.path.exists(outputdir):
473 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000474 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000475 raise error.TestError(msg)
476 os.mkdir(outputdir)
477
478 def group_func():
479 try:
480 test.runtest(self, url, tag, args, dargs)
481 except error.TestBaseException, e:
482 self.record(e.exit_status, subdir, testname, str(e))
483 raise
484 except Exception, e:
485 info = str(e) + "\n" + traceback.format_exc()
486 self.record('FAIL', subdir, testname, info)
487 raise
488 else:
mbligh2b92b862008-11-22 13:25:32 +0000489 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000490
491 result, exc_info = self._run_group(testname, subdir, group_func)
492 if exc_info and isinstance(exc_info[1], error.TestBaseException):
493 return False
494 elif exc_info:
495 raise exc_info[0], exc_info[1], exc_info[2]
496 else:
497 return True
jadmanski10646442008-08-13 14:05:21 +0000498
499
500 def _run_group(self, name, subdir, function, *args, **dargs):
501 """\
502 Underlying method for running something inside of a group.
503 """
jadmanskide292df2008-08-26 20:51:14 +0000504 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000505 old_record_prefix = self.record_prefix
506 try:
507 self.record('START', subdir, name)
508 self.record_prefix += '\t'
509 try:
510 result = function(*args, **dargs)
511 finally:
512 self.record_prefix = old_record_prefix
513 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000514 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000515 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000516 except Exception, e:
517 err_msg = str(e) + '\n'
518 err_msg += traceback.format_exc()
519 self.record('END ABORT', subdir, name, err_msg)
520 raise error.JobError(name + ' failed\n' + traceback.format_exc())
521 else:
522 self.record('END GOOD', subdir, name)
523
jadmanskide292df2008-08-26 20:51:14 +0000524 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000525
526
527 def run_group(self, function, *args, **dargs):
528 """\
529 function:
530 subroutine to run
531 *args:
532 arguments for the function
533 """
534
535 name = function.__name__
536
537 # Allow the tag for the group to be specified.
538 tag = dargs.pop('tag', None)
539 if tag:
540 name = tag
541
jadmanskide292df2008-08-26 20:51:14 +0000542 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000543
544
545 def run_reboot(self, reboot_func, get_kernel_func):
546 """\
547 A specialization of run_group meant specifically for handling
548 a reboot. Includes support for capturing the kernel version
549 after the reboot.
550
551 reboot_func: a function that carries out the reboot
552
553 get_kernel_func: a function that returns a string
554 representing the kernel version.
555 """
556
557 old_record_prefix = self.record_prefix
558 try:
559 self.record('START', None, 'reboot')
560 self.record_prefix += '\t'
561 reboot_func()
562 except Exception, e:
563 self.record_prefix = old_record_prefix
564 err_msg = str(e) + '\n' + traceback.format_exc()
565 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000566 raise
jadmanski10646442008-08-13 14:05:21 +0000567 else:
568 kernel = get_kernel_func()
569 self.record_prefix = old_record_prefix
570 self.record('END GOOD', None, 'reboot',
571 optional_fields={"kernel": kernel})
572
573
jadmanskie432dd22009-01-30 15:04:51 +0000574 def run_control(self, path):
575 """Execute a control file found at path (relative to the autotest
576 path). Intended for executing a control file within a control file,
577 not for running the top-level job control file."""
578 path = os.path.join(self.autodir, path)
579 control_file = self._load_control_file(path)
580 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
581
582
jadmanskic09fc152008-10-15 17:56:59 +0000583 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000584 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000585 on_every_test)
586
587
588 def add_sysinfo_logfile(self, file, on_every_test=False):
589 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
590
591
592 def _add_sysinfo_loggable(self, loggable, on_every_test):
593 if on_every_test:
594 self.sysinfo.test_loggables.add(loggable)
595 else:
596 self.sysinfo.boot_loggables.add(loggable)
597
598
jadmanski10646442008-08-13 14:05:21 +0000599 def record(self, status_code, subdir, operation, status='',
600 optional_fields=None):
601 """
602 Record job-level status
603
604 The intent is to make this file both machine parseable and
605 human readable. That involves a little more complexity, but
606 really isn't all that bad ;-)
607
608 Format is <status code>\t<subdir>\t<operation>\t<status>
609
mbligh1b3b3762008-09-25 02:46:34 +0000610 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000611 for valid status definition
612
613 subdir: MUST be a relevant subdirectory in the results,
614 or None, which will be represented as '----'
615
616 operation: description of what you ran (e.g. "dbench", or
617 "mkfs -t foobar /dev/sda9")
618
619 status: error message or "completed sucessfully"
620
621 ------------------------------------------------------------
622
623 Initial tabs indicate indent levels for grouping, and is
624 governed by self.record_prefix
625
626 multiline messages have secondary lines prefaced by a double
627 space (' ')
628
629 Executing this method will trigger the logging of all new
630 warnings to date from the various console loggers.
631 """
632 # poll all our warning loggers for new warnings
633 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000634 old_record_prefix = self.record_prefix
635 try:
636 if status_code.startswith("END "):
637 self.record_prefix += "\t"
638 for timestamp, msg in warnings:
639 self._record("WARN", None, None, msg, timestamp)
640 finally:
641 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000642
643 # write out the actual status log line
644 self._record(status_code, subdir, operation, status,
645 optional_fields=optional_fields)
646
647
648 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000649 """Poll all the warning loggers and extract any new warnings that have
650 been logged. If the warnings belong to a category that is currently
651 disabled, this method will discard them and they will no longer be
652 retrievable.
653
654 Returns a list of (timestamp, message) tuples, where timestamp is an
655 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000656 warnings = []
657 while True:
658 # pull in a line of output from every logger that has
659 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000660 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000661 closed_loggers = set()
662 for logger in loggers:
663 line = logger.readline()
664 # record any broken pipes (aka line == empty)
665 if len(line) == 0:
666 closed_loggers.add(logger)
667 continue
jadmanskif37df842009-02-11 00:03:26 +0000668 # parse out the warning
669 timestamp, msgtype, msg = line.split('\t', 2)
670 timestamp = int(timestamp)
671 # if the warning is valid, add it to the results
672 if self.warning_manager.is_valid(timestamp, msgtype):
673 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000674
675 # stop listening to loggers that are closed
676 self.warning_loggers -= closed_loggers
677
678 # stop if none of the loggers have any output left
679 if not loggers:
680 break
681
682 # sort into timestamp order
683 warnings.sort()
684 return warnings
685
686
jadmanski16a7ff72009-04-01 18:19:53 +0000687 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000688 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000689 self.record("INFO", None, None,
690 "disabling %s warnings" % warning_type,
691 {"warnings.disable": warning_type})
692 time.sleep(self.WARNING_DISABLE_DELAY)
jadmanskif37df842009-02-11 00:03:26 +0000693
694
jadmanski16a7ff72009-04-01 18:19:53 +0000695 def enable_warnings(self, warning_type):
696 time.sleep(self.WARNING_DISABLE_DELAY)
jadmanskif37df842009-02-11 00:03:26 +0000697 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000698 self.record("INFO", None, None,
699 "enabling %s warnings" % warning_type,
700 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000701
702
jadmanski779bd292009-03-19 17:33:33 +0000703 def get_status_log_path(self, subdir=None):
704 """Return the path to the job status log.
705
706 @param subdir - Optional paramter indicating that you want the path
707 to a subdirectory status log.
708
709 @returns The path where the status log should be.
710 """
mbligh210bae62009-04-01 18:33:13 +0000711 if self.resultdir:
712 if subdir:
713 return os.path.join(self.resultdir, subdir, "status.log")
714 else:
715 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000716 else:
mbligh210bae62009-04-01 18:33:13 +0000717 return None
jadmanski779bd292009-03-19 17:33:33 +0000718
719
jadmanski6bb32d72009-03-19 20:25:24 +0000720 def _update_uncollected_logs_list(self, update_func):
721 """Updates the uncollected logs list in a multi-process safe manner.
722
723 @param update_func - a function that updates the list of uncollected
724 logs. Should take one parameter, the list to be updated.
725 """
mbligha788dc42009-03-26 21:10:16 +0000726 if self.uncollected_log_file:
727 log_file = open(self.uncollected_log_file, "r+")
728 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000729 try:
730 uncollected_logs = pickle.load(log_file)
731 update_func(uncollected_logs)
732 log_file.seek(0)
733 log_file.truncate()
734 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000735 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000736 finally:
737 fcntl.flock(log_file, fcntl.LOCK_UN)
738 log_file.close()
739
740
741 def add_client_log(self, hostname, remote_path, local_path):
742 """Adds a new set of client logs to the list of uncollected logs,
743 to allow for future log recovery.
744
745 @param host - the hostname of the machine holding the logs
746 @param remote_path - the directory on the remote machine holding logs
747 @param local_path - the local directory to copy the logs into
748 """
749 def update_func(logs_list):
750 logs_list.append((hostname, remote_path, local_path))
751 self._update_uncollected_logs_list(update_func)
752
753
754 def remove_client_log(self, hostname, remote_path, local_path):
755 """Removes a set of client logs from the list of uncollected logs,
756 to allow for future log recovery.
757
758 @param host - the hostname of the machine holding the logs
759 @param remote_path - the directory on the remote machine holding logs
760 @param local_path - the local directory to copy the logs into
761 """
762 def update_func(logs_list):
763 logs_list.remove((hostname, remote_path, local_path))
764 self._update_uncollected_logs_list(update_func)
765
766
jadmanski10646442008-08-13 14:05:21 +0000767 def _render_record(self, status_code, subdir, operation, status='',
768 epoch_time=None, record_prefix=None,
769 optional_fields=None):
770 """
771 Internal Function to generate a record to be written into a
772 status log. For use by server_job.* classes only.
773 """
774 if subdir:
775 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000776 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000777 substr = subdir
778 else:
779 substr = '----'
780
mbligh1b3b3762008-09-25 02:46:34 +0000781 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000782 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000783 if not operation:
784 operation = '----'
785 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000786 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000787 operation = operation.rstrip()
788 status = status.rstrip()
789 status = re.sub(r"\t", " ", status)
790 # Ensure any continuation lines are marked so we can
791 # detect them in the status file to ensure it is parsable.
792 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
793
794 if not optional_fields:
795 optional_fields = {}
796
797 # Generate timestamps for inclusion in the logs
798 if epoch_time is None:
799 epoch_time = int(time.time())
800 local_time = time.localtime(epoch_time)
801 optional_fields["timestamp"] = str(epoch_time)
802 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
803 local_time)
804
805 fields = [status_code, substr, operation]
806 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
807 fields.append(status)
808
809 if record_prefix is None:
810 record_prefix = self.record_prefix
811
812 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000813 return record_prefix + msg + '\n'
814
815
816 def _record_prerendered(self, msg):
817 """
818 Record a pre-rendered msg into the status logs. The only
819 change this makes to the message is to add on the local
820 indentation. Should not be called outside of server_job.*
821 classes. Unlike _record, this does not write the message
822 to standard output.
823 """
824 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000825 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000826 status_log = open(status_file, 'a')
827 for line in msg.splitlines():
828 line = self.record_prefix + line + '\n'
829 lines.append(line)
830 status_log.write(line)
831 status_log.close()
832 self.__parse_status(lines)
833
834
mbligh084bc172008-10-18 14:02:45 +0000835 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000836 """
837 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000838
839 This sets up the control file API by importing modules and making them
840 available under the appropriate names within namespace.
841
842 For use by _execute_code().
843
844 Args:
845 namespace: The namespace dictionary to fill in.
846 protect: Boolean. If True (the default) any operation that would
847 clobber an existing entry in namespace will cause an error.
848 Raises:
849 error.AutoservError: When a name would be clobbered by import.
850 """
851 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000852 """
853 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000854
855 Args:
856 module_name: The string module name.
857 names: A limiting list of names to import from module_name. If
858 empty (the default), all names are imported from the module
859 similar to a "from foo.bar import *" statement.
860 Raises:
861 error.AutoservError: When a name being imported would clobber
862 a name already in namespace.
863 """
864 module = __import__(module_name, {}, {}, names)
865
866 # No names supplied? Import * from the lowest level module.
867 # (Ugh, why do I have to implement this part myself?)
868 if not names:
869 for submodule_name in module_name.split('.')[1:]:
870 module = getattr(module, submodule_name)
871 if hasattr(module, '__all__'):
872 names = getattr(module, '__all__')
873 else:
874 names = dir(module)
875
876 # Install each name into namespace, checking to make sure it
877 # doesn't override anything that already exists.
878 for name in names:
879 # Check for conflicts to help prevent future problems.
880 if name in namespace and protect:
881 if namespace[name] is not getattr(module, name):
882 raise error.AutoservError('importing name '
883 '%s from %s %r would override %r' %
884 (name, module_name, getattr(module, name),
885 namespace[name]))
886 else:
887 # Encourage cleanliness and the use of __all__ for a
888 # more concrete API with less surprises on '*' imports.
889 warnings.warn('%s (%r) being imported from %s for use '
890 'in server control files is not the '
891 'first occurrance of that import.' %
892 (name, namespace[name], module_name))
893
894 namespace[name] = getattr(module, name)
895
896
897 # This is the equivalent of prepending a bunch of import statements to
898 # the front of the control script.
899 namespace.update(os=os, sys=sys)
900 _import_names('autotest_lib.server',
901 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
902 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
903 _import_names('autotest_lib.server.subcommand',
904 ('parallel', 'parallel_simple', 'subcommand'))
905 _import_names('autotest_lib.server.utils',
906 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
907 _import_names('autotest_lib.client.common_lib.error')
908 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
909
910 # Inject ourself as the job object into other classes within the API.
911 # (Yuck, this injection is a gross thing be part of a public API. -gps)
912 #
913 # XXX Base & SiteAutotest do not appear to use .job. Who does?
914 namespace['autotest'].Autotest.job = self
915 # server.hosts.base_classes.Host uses .job.
916 namespace['hosts'].Host.job = self
917
918
919 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000920 """
921 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000922
923 Unless protect_namespace is explicitly set to False, the dict will not
924 be modified.
925
926 Args:
927 code_file: The filename of the control file to execute.
928 namespace: A dict containing names to make available during execution.
929 protect: Boolean. If True (the default) a copy of the namespace dict
930 is used during execution to prevent the code from modifying its
931 contents outside of this function. If False the raw dict is
932 passed in and modifications will be allowed.
933 """
934 if protect:
935 namespace = namespace.copy()
936 self._fill_server_control_namespace(namespace, protect=protect)
937 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000938 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000939 machines_text = '\n'.join(self.machines) + '\n'
940 # Only rewrite the file if it does not match our machine list.
941 try:
942 machines_f = open(MACHINES_FILENAME, 'r')
943 existing_machines_text = machines_f.read()
944 machines_f.close()
945 except EnvironmentError:
946 existing_machines_text = None
947 if machines_text != existing_machines_text:
948 utils.open_write_close(MACHINES_FILENAME, machines_text)
949 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +0000950
951
952 def _record(self, status_code, subdir, operation, status='',
953 epoch_time=None, optional_fields=None):
954 """
955 Actual function for recording a single line into the status
956 logs. Should never be called directly, only by job.record as
957 this would bypass the console monitor logging.
958 """
959
mbligh2b92b862008-11-22 13:25:32 +0000960 msg = self._render_record(status_code, subdir, operation, status,
961 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +0000962
jadmanski779bd292009-03-19 17:33:33 +0000963 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000964 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +0000965 if status_file:
966 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000967 if subdir:
jadmanski779bd292009-03-19 17:33:33 +0000968 sub_status_file = self.get_status_log_path(subdir)
969 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +0000970 self.__parse_status(msg.splitlines())
971
972
973 def __parse_status(self, new_lines):
974 if not self.using_parser:
975 return
976 new_tests = self.parser.process_lines(new_lines)
977 for test in new_tests:
978 self.__insert_test(test)
979
980
981 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +0000982 """
983 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +0000984 database. This method will not raise an exception, even if an
985 error occurs during the insert, to avoid failing a test
986 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +0000987 self.num_tests_run += 1
988 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
989 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +0000990 try:
991 self.results_db.insert_test(self.job_model, test)
992 except Exception:
993 msg = ("WARNING: An unexpected error occured while "
994 "inserting test results into the database. "
995 "Ignoring error.\n" + traceback.format_exc())
996 print >> sys.stderr, msg
997
mblighcaa62c22008-04-07 21:51:17 +0000998
mbligha7007722009-01-13 00:37:11 +0000999site_server_job = utils.import_site_class(
1000 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1001 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001002
mbligh0a8c3322009-04-28 18:32:19 +00001003class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001004 pass
jadmanskif37df842009-02-11 00:03:26 +00001005
1006
1007class warning_manager(object):
1008 """Class for controlling warning logs. Manages the enabling and disabling
1009 of warnings."""
1010 def __init__(self):
1011 # a map of warning types to a list of disabled time intervals
1012 self.disabled_warnings = {}
1013
1014
1015 def is_valid(self, timestamp, warning_type):
1016 """Indicates if a warning (based on the time it occured and its type)
1017 is a valid warning. A warning is considered "invalid" if this type of
1018 warning was marked as "disabled" at the time the warning occured."""
1019 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1020 for start, end in disabled_intervals:
1021 if timestamp >= start and (end is None or timestamp < end):
1022 return False
1023 return True
1024
1025
1026 def disable_warnings(self, warning_type, current_time_func=time.time):
1027 """As of now, disables all further warnings of this type."""
1028 intervals = self.disabled_warnings.setdefault(warning_type, [])
1029 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001030 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001031
1032
1033 def enable_warnings(self, warning_type, current_time_func=time.time):
1034 """As of now, enables all further warnings of this type."""
1035 intervals = self.disabled_warnings.get(warning_type, [])
1036 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001037 intervals[-1] = (intervals[-1][0], int(current_time_func()))