blob: c1930185bc41dc0a3b09a64a1ce03ad5737545f3 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
jadmanski0cb250f2010-01-05 18:42:15 +000010import traceback, shutil, warnings, fcntl, pickle, logging, itertools
showard75cdfee2009-06-10 17:40:41 +000011from autotest_lib.client.bin import sysinfo
mbligh0d0f67d2009-11-06 03:15:03 +000012from autotest_lib.client.common_lib import base_job
mbligh09108442008-10-15 16:27:38 +000013from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000014from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000015from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000016from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000017
18
mbligh084bc172008-10-18 14:02:45 +000019def _control_segment_path(name):
20 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000021 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000022 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000023
24
mbligh084bc172008-10-18 14:02:45 +000025CLIENT_CONTROL_FILENAME = 'control'
26SERVER_CONTROL_FILENAME = 'control.srv'
27MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000028
mbligh084bc172008-10-18 14:02:45 +000029CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
30CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
31CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000032INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000033CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000034
mbligh084bc172008-10-18 14:02:45 +000035VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000036REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000037
38
mbligh062ed152009-01-13 00:57:14 +000039# by default provide a stub that generates no site data
40def _get_site_job_data_dummy(job):
41 return {}
42
43
jadmanski10646442008-08-13 14:05:21 +000044# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000045get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000046 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000047 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000048
49
mbligh0d0f67d2009-11-06 03:15:03 +000050class base_server_job(base_job.base_job):
51 """The server-side concrete implementation of base_job.
jadmanski10646442008-08-13 14:05:21 +000052
mbligh0d0f67d2009-11-06 03:15:03 +000053 Optional properties provided by this implementation:
54 serverdir
55 conmuxdir
56
57 num_tests_run
58 num_tests_failed
59
60 warning_manager
61 warning_loggers
jadmanski10646442008-08-13 14:05:21 +000062 """
63
mbligh0d0f67d2009-11-06 03:15:03 +000064 _STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000065
66 def __init__(self, control, args, resultdir, label, user, machines,
67 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000068 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000069 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000070 """
mbligh374f3412009-05-13 21:29:45 +000071 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000072
mblighe7d9c602009-07-02 19:02:33 +000073 @param control: The pathname of the control file.
74 @param args: Passed to the control file.
75 @param resultdir: Where to throw the results.
76 @param label: Description of the job.
77 @param user: Username for the job (email address).
78 @param client: True if this is a client-side control file.
79 @param parse_job: string, if supplied it is the job execution tag that
80 the results will be passed through to the TKO parser with.
81 @param ssh_user: The SSH username. [root]
82 @param ssh_port: The SSH port number. [22]
83 @param ssh_pass: The SSH passphrase, if needed.
84 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000085 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000086 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000087 """
mbligh0d0f67d2009-11-06 03:15:03 +000088 super(base_server_job, self).__init__(resultdir=resultdir)
mbligha788dc42009-03-26 21:10:16 +000089
mbligh0d0f67d2009-11-06 03:15:03 +000090 path = os.path.dirname(__file__)
91 self.control = control
92 self._uncollected_log_file = os.path.join(self.resultdir,
93 'uncollected_logs')
94 debugdir = os.path.join(self.resultdir, 'debug')
95 if not os.path.exists(debugdir):
96 os.mkdir(debugdir)
97
98 if user:
99 self.user = user
100 else:
101 self.user = getpass.getuser()
102
103 self._args = args
jadmanski10646442008-08-13 14:05:21 +0000104 self.machines = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000105 self._client = client
106 self._record_prefix = ''
jadmanski10646442008-08-13 14:05:21 +0000107 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000108 self.warning_manager = warning_manager()
mbligh0d0f67d2009-11-06 03:15:03 +0000109 self._ssh_user = ssh_user
110 self._ssh_port = ssh_port
111 self._ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000112 self.tag = tag
mbligh09108442008-10-15 16:27:38 +0000113 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000114 self.hosts = set()
mbligh0d0f67d2009-11-06 03:15:03 +0000115 self.drop_caches = False
mblighb5dac432008-11-27 00:38:44 +0000116 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000117
showard75cdfee2009-06-10 17:40:41 +0000118 self.logging = logging_manager.get_logging_manager(
119 manage_stdout_and_stderr=True, redirect_fds=True)
120 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000121
mbligh0d0f67d2009-11-06 03:15:03 +0000122 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000123 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000124
jadmanski10646442008-08-13 14:05:21 +0000125 job_data = {'label' : label, 'user' : user,
126 'hostname' : ','.join(machines),
mbligh0d0f67d2009-11-06 03:15:03 +0000127 'status_version' : str(self._STATUS_VERSION),
showard170873e2009-01-07 00:22:26 +0000128 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000129 if group_name:
130 job_data['host_group_name'] = group_name
jadmanski10646442008-08-13 14:05:21 +0000131
mbligh0d0f67d2009-11-06 03:15:03 +0000132 # only write these keyvals out on the first job in a resultdir
133 if 'job_started' not in utils.read_keyval(self.resultdir):
134 job_data.update(get_site_job_data(self))
135 utils.write_keyval(self.resultdir, job_data)
136
137 self._parse_job = parse_job
mbligh4608b002010-01-05 18:22:35 +0000138 self._using_parser = (self._parse_job and len(machines) == 1)
mbligh0d0f67d2009-11-06 03:15:03 +0000139 self.pkgmgr = packages.PackageManager(
140 self.autodir, run_function_dargs={'timeout':600})
showard21baa452008-10-21 00:08:39 +0000141 self.num_tests_run = 0
142 self.num_tests_failed = 0
143
mbligh15971eb2009-12-29 02:55:23 +0000144 # should tell us if this job results are inside a machine named
145 # directory
146 self.in_machine_dir = False
147
jadmanski550fdc22008-11-20 16:32:08 +0000148 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000149 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000150
mbligh0d0f67d2009-11-06 03:15:03 +0000151 # these components aren't usable on the server
152 self.bootloader = None
153 self.harness = None
154
155
156 @classmethod
157 def _find_base_directories(cls):
158 """
159 Determine locations of autodir, clientdir and serverdir. Assumes
160 that this file is located within serverdir and uses __file__ along
161 with relative paths to resolve the location.
162 """
163 serverdir = os.path.abspath(os.path.dirname(__file__))
164 autodir = os.path.normpath(os.path.join(serverdir, '..'))
165 clientdir = os.path.join(autodir, 'client')
166 return autodir, clientdir, serverdir
167
168
169 def _find_resultdir(self, resultdir):
170 """
171 Determine the location of resultdir. For server jobs we expect one to
172 always be explicitly passed in to __init__, so just return that.
173 """
174 if resultdir:
175 return os.path.normpath(resultdir)
176 else:
177 return None
178
jadmanski550fdc22008-11-20 16:32:08 +0000179
jadmanskie432dd22009-01-30 15:04:51 +0000180 @staticmethod
181 def _load_control_file(path):
182 f = open(path)
183 try:
184 control_file = f.read()
185 finally:
186 f.close()
187 return re.sub('\r', '', control_file)
188
189
jadmanski550fdc22008-11-20 16:32:08 +0000190 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000191 """
192 Register some hooks into the subcommand modules that allow us
193 to properly clean up self.hosts created in forked subprocesses.
194 """
jadmanski550fdc22008-11-20 16:32:08 +0000195 def on_fork(cmd):
196 self._existing_hosts_on_fork = set(self.hosts)
197 def on_join(cmd):
198 new_hosts = self.hosts - self._existing_hosts_on_fork
199 for host in new_hosts:
200 host.close()
201 subcommand.subcommand.register_fork_hook(on_fork)
202 subcommand.subcommand.register_join_hook(on_join)
203
jadmanski10646442008-08-13 14:05:21 +0000204
mbligh4608b002010-01-05 18:22:35 +0000205 def init_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000206 """
mbligh4608b002010-01-05 18:22:35 +0000207 Start the continuous parsing of self.resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000208 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000209 the database if necessary.
210 """
mbligh4608b002010-01-05 18:22:35 +0000211 if not self._using_parser:
212 return
jadmanski10646442008-08-13 14:05:21 +0000213 # redirect parser debugging to .parse.log
mbligh4608b002010-01-05 18:22:35 +0000214 parse_log = os.path.join(self.resultdir, '.parse.log')
jadmanski10646442008-08-13 14:05:21 +0000215 parse_log = open(parse_log, 'w', 0)
216 tko_utils.redirect_parser_debugging(parse_log)
217 # create a job model object and set up the db
218 self.results_db = tko_db.db(autocommit=True)
mbligh0d0f67d2009-11-06 03:15:03 +0000219 self.parser = status_lib.parser(self._STATUS_VERSION)
mbligh4608b002010-01-05 18:22:35 +0000220 self.job_model = self.parser.make_job(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000221 self.parser.start(self.job_model)
222 # check if a job already exists in the db and insert it if
223 # it does not
mbligh0d0f67d2009-11-06 03:15:03 +0000224 job_idx = self.results_db.find_job(self._parse_job)
jadmanski10646442008-08-13 14:05:21 +0000225 if job_idx is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000226 self.results_db.insert_job(self._parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000227 else:
mbligh2b92b862008-11-22 13:25:32 +0000228 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000229 self.job_model.index = job_idx
230 self.job_model.machine_idx = machine_idx
231
232
233 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000234 """
235 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000236 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000237 remaining test results to the results db)
238 """
mbligh0d0f67d2009-11-06 03:15:03 +0000239 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +0000240 return
241 final_tests = self.parser.end()
242 for test in final_tests:
243 self.__insert_test(test)
mbligh0d0f67d2009-11-06 03:15:03 +0000244 self._using_parser = False
jadmanski10646442008-08-13 14:05:21 +0000245
246
247 def verify(self):
248 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000249 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000250 if self.resultdir:
251 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000252 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000253 namespace = {'machines' : self.machines, 'job' : self,
mbligh0d0f67d2009-11-06 03:15:03 +0000254 'ssh_user' : self._ssh_user,
255 'ssh_port' : self._ssh_port,
256 'ssh_pass' : self._ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000257 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000258 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000259 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000260 self.record('ABORT', None, None, msg)
261 raise
262
263
264 def repair(self, host_protection):
265 if not self.machines:
266 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000267 if self.resultdir:
268 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000269 namespace = {'machines': self.machines, 'job': self,
mbligh0d0f67d2009-11-06 03:15:03 +0000270 'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
271 'ssh_pass': self._ssh_pass,
jadmanski10646442008-08-13 14:05:21 +0000272 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000273
mbligh0931b0a2009-04-08 17:44:48 +0000274 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000275
276
277 def precheck(self):
278 """
279 perform any additional checks in derived classes.
280 """
281 pass
282
283
284 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000285 """
286 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000287 """
288 pass
289
290
291 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000292 """
293 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000294 """
295 pass
296
297
298 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000299 """
300 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000301 """
302 return False
303
304
mbligh415dc212009-06-15 21:53:34 +0000305 def _make_parallel_wrapper(self, function, machines, log):
306 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000307 is_forking = not (len(machines) == 1 and self.machines == machines)
mbligh0d0f67d2009-11-06 03:15:03 +0000308 if self._parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000309 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000310 self._parse_job += "/" + machine
311 self._using_parser = True
jadmanski10646442008-08-13 14:05:21 +0000312 self.machines = [machine]
mbligh0d0f67d2009-11-06 03:15:03 +0000313 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000314 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000315 self.in_machine_dir = True
showard2bab8f42008-11-12 18:15:22 +0000316 utils.write_keyval(self.resultdir, {"hostname": machine})
mbligh4608b002010-01-05 18:22:35 +0000317 self.init_parser()
jadmanski10646442008-08-13 14:05:21 +0000318 result = function(machine)
319 self.cleanup_parser()
320 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000321 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000322 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000323 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000324 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000325 self.in_machine_dir = True
mbligh838d82d2009-03-11 17:14:31 +0000326 machine_data = {'hostname' : machine,
mbligh0d0f67d2009-11-06 03:15:03 +0000327 'status_version' : str(self._STATUS_VERSION)}
mbligh838d82d2009-03-11 17:14:31 +0000328 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000329 result = function(machine)
330 return result
331 else:
332 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000333 return wrapper
334
335
336 def parallel_simple(self, function, machines, log=True, timeout=None,
337 return_results=False):
338 """
339 Run 'function' using parallel_simple, with an extra wrapper to handle
340 the necessary setup for continuous parsing, if possible. If continuous
341 parsing is already properly initialized then this should just work.
342
343 @param function: A callable to run in parallel given each machine.
344 @param machines: A list of machine names to be passed one per subcommand
345 invocation of function.
346 @param log: If True, output will be written to output in a subdirectory
347 named after each machine.
348 @param timeout: Seconds after which the function call should timeout.
349 @param return_results: If True instead of an AutoServError being raised
350 on any error a list of the results|exceptions from the function
351 called on each arg is returned. [default: False]
352
353 @raises error.AutotestError: If any of the functions failed.
354 """
355 wrapper = self._make_parallel_wrapper(function, machines, log)
356 return subcommand.parallel_simple(wrapper, machines,
357 log=log, timeout=timeout,
358 return_results=return_results)
359
360
361 def parallel_on_machines(self, function, machines, timeout=None):
362 """
showardcd5fac42009-07-06 20:19:43 +0000363 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000364 @param machines: A list of machines to call function(machine) on.
365 @param timeout: Seconds after which the function call should timeout.
366
367 @returns A list of machines on which function(machine) returned
368 without raising an exception.
369 """
showardcd5fac42009-07-06 20:19:43 +0000370 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000371 return_results=True)
372 success_machines = []
373 for result, machine in itertools.izip(results, machines):
374 if not isinstance(result, Exception):
375 success_machines.append(machine)
376 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000377
378
mbligh0d0f67d2009-11-06 03:15:03 +0000379 _USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000380 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000381 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000382 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000383 # for a normal job, make sure the uncollected logs file exists
384 # for a crashinfo-only run it should already exist, bail out otherwise
mbligh0d0f67d2009-11-06 03:15:03 +0000385 if self.resultdir and not os.path.exists(self._uncollected_log_file):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000386 if only_collect_crashinfo:
387 # if this is a crashinfo-only run, and there were no existing
388 # uncollected logs, just bail out early
389 logging.info("No existing uncollected logs, "
390 "skipping crashinfo collection")
391 return
392 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000393 log_file = open(self._uncollected_log_file, "w")
jadmanskifb9c0fa2009-04-29 17:39:16 +0000394 pickle.dump([], log_file)
395 log_file.close()
396
jadmanski10646442008-08-13 14:05:21 +0000397 # use a copy so changes don't affect the original dictionary
398 namespace = namespace.copy()
399 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000400 if control is None:
jadmanski02a3ba22009-11-13 20:47:27 +0000401 if self.control is None:
402 control = ''
403 else:
404 control = self._load_control_file(self.control)
jadmanskie432dd22009-01-30 15:04:51 +0000405 if control_file_dir is None:
406 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000407
408 self.aborted = False
409 namespace['machines'] = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000410 namespace['args'] = self._args
jadmanski10646442008-08-13 14:05:21 +0000411 namespace['job'] = self
mbligh0d0f67d2009-11-06 03:15:03 +0000412 namespace['ssh_user'] = self._ssh_user
413 namespace['ssh_port'] = self._ssh_port
414 namespace['ssh_pass'] = self._ssh_pass
jadmanski10646442008-08-13 14:05:21 +0000415 test_start_time = int(time.time())
416
mbligh80e1eba2008-11-19 00:26:18 +0000417 if self.resultdir:
418 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000419 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000420 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000421 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000422
jadmanskicdd0c402008-09-19 21:21:31 +0000423 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000424 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000425 try:
showardcf8d4922009-10-14 16:08:39 +0000426 try:
427 if install_before and machines:
428 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000429
showardcf8d4922009-10-14 16:08:39 +0000430 if only_collect_crashinfo:
431 return
432
jadmanskidef0c3c2009-03-25 20:07:10 +0000433 # determine the dir to write the control files to
434 cfd_specified = (control_file_dir
mbligh0d0f67d2009-11-06 03:15:03 +0000435 and control_file_dir is not self._USE_TEMP_DIR)
jadmanskidef0c3c2009-03-25 20:07:10 +0000436 if cfd_specified:
437 temp_control_file_dir = None
438 else:
439 temp_control_file_dir = tempfile.mkdtemp(
440 suffix='temp_control_file_dir')
441 control_file_dir = temp_control_file_dir
442 server_control_file = os.path.join(control_file_dir,
443 SERVER_CONTROL_FILENAME)
444 client_control_file = os.path.join(control_file_dir,
445 CLIENT_CONTROL_FILENAME)
mbligh0d0f67d2009-11-06 03:15:03 +0000446 if self._client:
jadmanskidef0c3c2009-03-25 20:07:10 +0000447 namespace['control'] = control
448 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000449 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
450 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000451 else:
452 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000453 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000454 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000455 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000456
jadmanskidef0c3c2009-03-25 20:07:10 +0000457 # no error occured, so we don't need to collect crashinfo
458 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000459 except:
460 try:
461 logging.exception(
462 'Exception escaped control file, job aborting:')
463 except:
464 pass # don't let logging exceptions here interfere
465 raise
jadmanski10646442008-08-13 14:05:21 +0000466 finally:
mblighaebe3b62008-12-22 14:45:40 +0000467 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000468 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000469 try:
470 shutil.rmtree(temp_control_file_dir)
471 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000472 logging.warn('Could not remove temp directory %s: %s',
473 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000474
jadmanskicdd0c402008-09-19 21:21:31 +0000475 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000476 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000477 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000478 # includes crashdumps
479 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000480 else:
mbligh084bc172008-10-18 14:02:45 +0000481 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligh0d0f67d2009-11-06 03:15:03 +0000482 if self._uncollected_log_file:
483 os.remove(self._uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000484 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000485 if cleanup and machines:
486 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000487 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000488 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000489
490
mbligh7eacbc22009-07-28 23:13:56 +0000491 def set_test_tag_prefix(self, tag=''):
492 """
493 Set tag to be prepended (separated by a '.') to test name of all
494 following run_test steps.
495 """
496 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000497
498
jadmanski10646442008-08-13 14:05:21 +0000499 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000500 """
501 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000502
503 tag
504 tag to add to testname
505 url
506 url of the test to run
507 """
508
509 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000510
511 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000512 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000513 tag = self._test_tag_prefix
514 elif self._test_tag_prefix:
515 tag = '%s.%s' % (self._test_tag_prefix, tag)
516
jadmanski10646442008-08-13 14:05:21 +0000517 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000518 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000519 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000520
521 outputdir = os.path.join(self.resultdir, subdir)
522 if os.path.exists(outputdir):
523 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000524 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000525 raise error.TestError(msg)
526 os.mkdir(outputdir)
527
528 def group_func():
529 try:
530 test.runtest(self, url, tag, args, dargs)
531 except error.TestBaseException, e:
532 self.record(e.exit_status, subdir, testname, str(e))
533 raise
534 except Exception, e:
535 info = str(e) + "\n" + traceback.format_exc()
536 self.record('FAIL', subdir, testname, info)
537 raise
538 else:
mbligh2b92b862008-11-22 13:25:32 +0000539 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000540
541 result, exc_info = self._run_group(testname, subdir, group_func)
542 if exc_info and isinstance(exc_info[1], error.TestBaseException):
543 return False
544 elif exc_info:
545 raise exc_info[0], exc_info[1], exc_info[2]
546 else:
547 return True
jadmanski10646442008-08-13 14:05:21 +0000548
549
550 def _run_group(self, name, subdir, function, *args, **dargs):
551 """\
552 Underlying method for running something inside of a group.
553 """
jadmanskide292df2008-08-26 20:51:14 +0000554 result, exc_info = None, None
mbligh0d0f67d2009-11-06 03:15:03 +0000555 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000556 try:
557 self.record('START', subdir, name)
mbligh0d0f67d2009-11-06 03:15:03 +0000558 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000559 try:
560 result = function(*args, **dargs)
561 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000562 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000563 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000564 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000565 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000566 except Exception, e:
567 err_msg = str(e) + '\n'
568 err_msg += traceback.format_exc()
569 self.record('END ABORT', subdir, name, err_msg)
570 raise error.JobError(name + ' failed\n' + traceback.format_exc())
571 else:
572 self.record('END GOOD', subdir, name)
573
jadmanskide292df2008-08-26 20:51:14 +0000574 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000575
576
577 def run_group(self, function, *args, **dargs):
578 """\
579 function:
580 subroutine to run
581 *args:
582 arguments for the function
583 """
584
585 name = function.__name__
586
587 # Allow the tag for the group to be specified.
588 tag = dargs.pop('tag', None)
589 if tag:
590 name = tag
591
jadmanskide292df2008-08-26 20:51:14 +0000592 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000593
594
595 def run_reboot(self, reboot_func, get_kernel_func):
596 """\
597 A specialization of run_group meant specifically for handling
598 a reboot. Includes support for capturing the kernel version
599 after the reboot.
600
601 reboot_func: a function that carries out the reboot
602
603 get_kernel_func: a function that returns a string
604 representing the kernel version.
605 """
606
mbligh0d0f67d2009-11-06 03:15:03 +0000607 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000608 try:
609 self.record('START', None, 'reboot')
mbligh0d0f67d2009-11-06 03:15:03 +0000610 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000611 reboot_func()
612 except Exception, e:
mbligh0d0f67d2009-11-06 03:15:03 +0000613 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000614 err_msg = str(e) + '\n' + traceback.format_exc()
615 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000616 raise
jadmanski10646442008-08-13 14:05:21 +0000617 else:
618 kernel = get_kernel_func()
mbligh0d0f67d2009-11-06 03:15:03 +0000619 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000620 self.record('END GOOD', None, 'reboot',
621 optional_fields={"kernel": kernel})
622
623
jadmanskie432dd22009-01-30 15:04:51 +0000624 def run_control(self, path):
625 """Execute a control file found at path (relative to the autotest
626 path). Intended for executing a control file within a control file,
627 not for running the top-level job control file."""
628 path = os.path.join(self.autodir, path)
629 control_file = self._load_control_file(path)
mbligh0d0f67d2009-11-06 03:15:03 +0000630 self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
jadmanskie432dd22009-01-30 15:04:51 +0000631
632
jadmanskic09fc152008-10-15 17:56:59 +0000633 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000634 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000635 on_every_test)
636
637
638 def add_sysinfo_logfile(self, file, on_every_test=False):
639 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
640
641
642 def _add_sysinfo_loggable(self, loggable, on_every_test):
643 if on_every_test:
644 self.sysinfo.test_loggables.add(loggable)
645 else:
646 self.sysinfo.boot_loggables.add(loggable)
647
648
jadmanski10646442008-08-13 14:05:21 +0000649 def record(self, status_code, subdir, operation, status='',
650 optional_fields=None):
651 """
652 Record job-level status
653
654 The intent is to make this file both machine parseable and
655 human readable. That involves a little more complexity, but
656 really isn't all that bad ;-)
657
658 Format is <status code>\t<subdir>\t<operation>\t<status>
659
mbligh1b3b3762008-09-25 02:46:34 +0000660 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000661 for valid status definition
662
663 subdir: MUST be a relevant subdirectory in the results,
664 or None, which will be represented as '----'
665
666 operation: description of what you ran (e.g. "dbench", or
667 "mkfs -t foobar /dev/sda9")
668
669 status: error message or "completed sucessfully"
670
671 ------------------------------------------------------------
672
673 Initial tabs indicate indent levels for grouping, and is
mbligh0d0f67d2009-11-06 03:15:03 +0000674 governed by self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000675
676 multiline messages have secondary lines prefaced by a double
677 space (' ')
678
679 Executing this method will trigger the logging of all new
680 warnings to date from the various console loggers.
681 """
682 # poll all our warning loggers for new warnings
683 warnings = self._read_warnings()
mbligh0d0f67d2009-11-06 03:15:03 +0000684 old_record_prefix = self._record_prefix
jadmanski2de83112009-04-01 18:21:04 +0000685 try:
686 if status_code.startswith("END "):
mbligh0d0f67d2009-11-06 03:15:03 +0000687 self._record_prefix += "\t"
jadmanski2de83112009-04-01 18:21:04 +0000688 for timestamp, msg in warnings:
689 self._record("WARN", None, None, msg, timestamp)
690 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000691 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000692
693 # write out the actual status log line
694 self._record(status_code, subdir, operation, status,
695 optional_fields=optional_fields)
696
697
698 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000699 """Poll all the warning loggers and extract any new warnings that have
700 been logged. If the warnings belong to a category that is currently
701 disabled, this method will discard them and they will no longer be
702 retrievable.
703
704 Returns a list of (timestamp, message) tuples, where timestamp is an
705 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000706 warnings = []
707 while True:
708 # pull in a line of output from every logger that has
709 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000710 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000711 closed_loggers = set()
712 for logger in loggers:
713 line = logger.readline()
714 # record any broken pipes (aka line == empty)
715 if len(line) == 0:
716 closed_loggers.add(logger)
717 continue
jadmanskif37df842009-02-11 00:03:26 +0000718 # parse out the warning
719 timestamp, msgtype, msg = line.split('\t', 2)
720 timestamp = int(timestamp)
721 # if the warning is valid, add it to the results
722 if self.warning_manager.is_valid(timestamp, msgtype):
723 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000724
725 # stop listening to loggers that are closed
726 self.warning_loggers -= closed_loggers
727
728 # stop if none of the loggers have any output left
729 if not loggers:
730 break
731
732 # sort into timestamp order
733 warnings.sort()
734 return warnings
735
736
jadmanski16a7ff72009-04-01 18:19:53 +0000737 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000738 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000739 self.record("INFO", None, None,
740 "disabling %s warnings" % warning_type,
741 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000742
743
jadmanski16a7ff72009-04-01 18:19:53 +0000744 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000745 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000746 self.record("INFO", None, None,
747 "enabling %s warnings" % warning_type,
748 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000749
750
jadmanski779bd292009-03-19 17:33:33 +0000751 def get_status_log_path(self, subdir=None):
752 """Return the path to the job status log.
753
754 @param subdir - Optional paramter indicating that you want the path
755 to a subdirectory status log.
756
757 @returns The path where the status log should be.
758 """
mbligh210bae62009-04-01 18:33:13 +0000759 if self.resultdir:
760 if subdir:
761 return os.path.join(self.resultdir, subdir, "status.log")
762 else:
763 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000764 else:
mbligh210bae62009-04-01 18:33:13 +0000765 return None
jadmanski779bd292009-03-19 17:33:33 +0000766
767
jadmanski6bb32d72009-03-19 20:25:24 +0000768 def _update_uncollected_logs_list(self, update_func):
769 """Updates the uncollected logs list in a multi-process safe manner.
770
771 @param update_func - a function that updates the list of uncollected
772 logs. Should take one parameter, the list to be updated.
773 """
mbligh0d0f67d2009-11-06 03:15:03 +0000774 if self._uncollected_log_file:
775 log_file = open(self._uncollected_log_file, "r+")
mbligha788dc42009-03-26 21:10:16 +0000776 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000777 try:
778 uncollected_logs = pickle.load(log_file)
779 update_func(uncollected_logs)
780 log_file.seek(0)
781 log_file.truncate()
782 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000783 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000784 finally:
785 fcntl.flock(log_file, fcntl.LOCK_UN)
786 log_file.close()
787
788
789 def add_client_log(self, hostname, remote_path, local_path):
790 """Adds a new set of client logs to the list of uncollected logs,
791 to allow for future log recovery.
792
793 @param host - the hostname of the machine holding the logs
794 @param remote_path - the directory on the remote machine holding logs
795 @param local_path - the local directory to copy the logs into
796 """
797 def update_func(logs_list):
798 logs_list.append((hostname, remote_path, local_path))
799 self._update_uncollected_logs_list(update_func)
800
801
802 def remove_client_log(self, hostname, remote_path, local_path):
803 """Removes a set of client logs from the list of uncollected logs,
804 to allow for future log recovery.
805
806 @param host - the hostname of the machine holding the logs
807 @param remote_path - the directory on the remote machine holding logs
808 @param local_path - the local directory to copy the logs into
809 """
810 def update_func(logs_list):
811 logs_list.remove((hostname, remote_path, local_path))
812 self._update_uncollected_logs_list(update_func)
813
814
mbligh0d0f67d2009-11-06 03:15:03 +0000815 def get_client_logs(self):
816 """Retrieves the list of uncollected logs, if it exists.
817
818 @returns A list of (host, remote_path, local_path) tuples. Returns
819 an empty list if no uncollected logs file exists.
820 """
821 log_exists = (self._uncollected_log_file and
822 os.path.exists(self._uncollected_log_file))
823 if log_exists:
824 return pickle.load(open(self._uncollected_log_file))
825 else:
826 return []
827
828
jadmanski10646442008-08-13 14:05:21 +0000829 def _render_record(self, status_code, subdir, operation, status='',
830 epoch_time=None, record_prefix=None,
831 optional_fields=None):
832 """
833 Internal Function to generate a record to be written into a
834 status log. For use by server_job.* classes only.
835 """
836 if subdir:
837 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000838 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000839 substr = subdir
840 else:
841 substr = '----'
842
mbligh1b3b3762008-09-25 02:46:34 +0000843 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000844 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000845 if not operation:
846 operation = '----'
847 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000848 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000849 operation = operation.rstrip()
850 status = status.rstrip()
851 status = re.sub(r"\t", " ", status)
852 # Ensure any continuation lines are marked so we can
853 # detect them in the status file to ensure it is parsable.
mbligh0d0f67d2009-11-06 03:15:03 +0000854 status = re.sub(r"\n", "\n" + self._record_prefix + " ", status)
jadmanski10646442008-08-13 14:05:21 +0000855
856 if not optional_fields:
857 optional_fields = {}
858
859 # Generate timestamps for inclusion in the logs
860 if epoch_time is None:
861 epoch_time = int(time.time())
862 local_time = time.localtime(epoch_time)
863 optional_fields["timestamp"] = str(epoch_time)
864 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
865 local_time)
866
867 fields = [status_code, substr, operation]
868 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
869 fields.append(status)
870
871 if record_prefix is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000872 record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000873
874 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000875 return record_prefix + msg + '\n'
876
877
878 def _record_prerendered(self, msg):
879 """
880 Record a pre-rendered msg into the status logs. The only
881 change this makes to the message is to add on the local
882 indentation. Should not be called outside of server_job.*
883 classes. Unlike _record, this does not write the message
884 to standard output.
885 """
886 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000887 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000888 status_log = open(status_file, 'a')
889 for line in msg.splitlines():
mbligh0d0f67d2009-11-06 03:15:03 +0000890 line = self._record_prefix + line + '\n'
jadmanski10646442008-08-13 14:05:21 +0000891 lines.append(line)
892 status_log.write(line)
893 status_log.close()
894 self.__parse_status(lines)
895
896
mbligh084bc172008-10-18 14:02:45 +0000897 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000898 """
899 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000900
901 This sets up the control file API by importing modules and making them
902 available under the appropriate names within namespace.
903
904 For use by _execute_code().
905
906 Args:
907 namespace: The namespace dictionary to fill in.
908 protect: Boolean. If True (the default) any operation that would
909 clobber an existing entry in namespace will cause an error.
910 Raises:
911 error.AutoservError: When a name would be clobbered by import.
912 """
913 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000914 """
915 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000916
917 Args:
918 module_name: The string module name.
919 names: A limiting list of names to import from module_name. If
920 empty (the default), all names are imported from the module
921 similar to a "from foo.bar import *" statement.
922 Raises:
923 error.AutoservError: When a name being imported would clobber
924 a name already in namespace.
925 """
926 module = __import__(module_name, {}, {}, names)
927
928 # No names supplied? Import * from the lowest level module.
929 # (Ugh, why do I have to implement this part myself?)
930 if not names:
931 for submodule_name in module_name.split('.')[1:]:
932 module = getattr(module, submodule_name)
933 if hasattr(module, '__all__'):
934 names = getattr(module, '__all__')
935 else:
936 names = dir(module)
937
938 # Install each name into namespace, checking to make sure it
939 # doesn't override anything that already exists.
940 for name in names:
941 # Check for conflicts to help prevent future problems.
942 if name in namespace and protect:
943 if namespace[name] is not getattr(module, name):
944 raise error.AutoservError('importing name '
945 '%s from %s %r would override %r' %
946 (name, module_name, getattr(module, name),
947 namespace[name]))
948 else:
949 # Encourage cleanliness and the use of __all__ for a
950 # more concrete API with less surprises on '*' imports.
951 warnings.warn('%s (%r) being imported from %s for use '
952 'in server control files is not the '
953 'first occurrance of that import.' %
954 (name, namespace[name], module_name))
955
956 namespace[name] = getattr(module, name)
957
958
959 # This is the equivalent of prepending a bunch of import statements to
960 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000961 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000962 _import_names('autotest_lib.server',
963 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
964 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
965 _import_names('autotest_lib.server.subcommand',
966 ('parallel', 'parallel_simple', 'subcommand'))
967 _import_names('autotest_lib.server.utils',
968 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
969 _import_names('autotest_lib.client.common_lib.error')
970 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
971
972 # Inject ourself as the job object into other classes within the API.
973 # (Yuck, this injection is a gross thing be part of a public API. -gps)
974 #
975 # XXX Base & SiteAutotest do not appear to use .job. Who does?
976 namespace['autotest'].Autotest.job = self
977 # server.hosts.base_classes.Host uses .job.
978 namespace['hosts'].Host.job = self
979
980
981 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000982 """
983 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000984
985 Unless protect_namespace is explicitly set to False, the dict will not
986 be modified.
987
988 Args:
989 code_file: The filename of the control file to execute.
990 namespace: A dict containing names to make available during execution.
991 protect: Boolean. If True (the default) a copy of the namespace dict
992 is used during execution to prevent the code from modifying its
993 contents outside of this function. If False the raw dict is
994 passed in and modifications will be allowed.
995 """
996 if protect:
997 namespace = namespace.copy()
998 self._fill_server_control_namespace(namespace, protect=protect)
999 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001000 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001001 machines_text = '\n'.join(self.machines) + '\n'
1002 # Only rewrite the file if it does not match our machine list.
1003 try:
1004 machines_f = open(MACHINES_FILENAME, 'r')
1005 existing_machines_text = machines_f.read()
1006 machines_f.close()
1007 except EnvironmentError:
1008 existing_machines_text = None
1009 if machines_text != existing_machines_text:
1010 utils.open_write_close(MACHINES_FILENAME, machines_text)
1011 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001012
1013
1014 def _record(self, status_code, subdir, operation, status='',
1015 epoch_time=None, optional_fields=None):
1016 """
1017 Actual function for recording a single line into the status
1018 logs. Should never be called directly, only by job.record as
1019 this would bypass the console monitor logging.
1020 """
1021
mbligh2b92b862008-11-22 13:25:32 +00001022 msg = self._render_record(status_code, subdir, operation, status,
1023 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001024
jadmanski779bd292009-03-19 17:33:33 +00001025 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001026 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001027 if status_file:
1028 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001029 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001030 sub_status_file = self.get_status_log_path(subdir)
1031 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001032 self.__parse_status(msg.splitlines())
1033
1034
1035 def __parse_status(self, new_lines):
mbligh0d0f67d2009-11-06 03:15:03 +00001036 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +00001037 return
1038 new_tests = self.parser.process_lines(new_lines)
1039 for test in new_tests:
1040 self.__insert_test(test)
1041
1042
1043 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001044 """
1045 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001046 database. This method will not raise an exception, even if an
1047 error occurs during the insert, to avoid failing a test
1048 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001049 self.num_tests_run += 1
1050 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1051 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001052 try:
1053 self.results_db.insert_test(self.job_model, test)
1054 except Exception:
1055 msg = ("WARNING: An unexpected error occured while "
1056 "inserting test results into the database. "
1057 "Ignoring error.\n" + traceback.format_exc())
1058 print >> sys.stderr, msg
1059
mblighcaa62c22008-04-07 21:51:17 +00001060
mbligha7007722009-01-13 00:37:11 +00001061site_server_job = utils.import_site_class(
1062 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1063 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001064
mbligh0a8c3322009-04-28 18:32:19 +00001065class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001066 pass
jadmanskif37df842009-02-11 00:03:26 +00001067
1068
1069class warning_manager(object):
1070 """Class for controlling warning logs. Manages the enabling and disabling
1071 of warnings."""
1072 def __init__(self):
1073 # a map of warning types to a list of disabled time intervals
1074 self.disabled_warnings = {}
1075
1076
1077 def is_valid(self, timestamp, warning_type):
1078 """Indicates if a warning (based on the time it occured and its type)
1079 is a valid warning. A warning is considered "invalid" if this type of
1080 warning was marked as "disabled" at the time the warning occured."""
1081 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1082 for start, end in disabled_intervals:
1083 if timestamp >= start and (end is None or timestamp < end):
1084 return False
1085 return True
1086
1087
1088 def disable_warnings(self, warning_type, current_time_func=time.time):
1089 """As of now, disables all further warnings of this type."""
1090 intervals = self.disabled_warnings.setdefault(warning_type, [])
1091 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001092 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001093
1094
1095 def enable_warnings(self, warning_type, current_time_func=time.time):
1096 """As of now, enables all further warnings of this type."""
1097 intervals = self.disabled_warnings.get(warning_type, [])
1098 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001099 intervals[-1] = (intervals[-1][0], int(current_time_func()))