blob: 2b8ef6ebc63f5c4653b99a539873a3d6cc6dce36 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh0d0f67d2009-11-06 03:15:03 +000013from autotest_lib.client.common_lib import base_job
mbligh09108442008-10-15 16:27:38 +000014from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000015from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000016from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000017from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000018
19
mbligh084bc172008-10-18 14:02:45 +000020def _control_segment_path(name):
21 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000022 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000023 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000024
25
mbligh084bc172008-10-18 14:02:45 +000026CLIENT_CONTROL_FILENAME = 'control'
27SERVER_CONTROL_FILENAME = 'control.srv'
28MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000029
mbligh084bc172008-10-18 14:02:45 +000030CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
31CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
32CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000033INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000034CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000035
mbligh084bc172008-10-18 14:02:45 +000036VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000037REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000038
39
mbligh062ed152009-01-13 00:57:14 +000040# by default provide a stub that generates no site data
41def _get_site_job_data_dummy(job):
42 return {}
43
44
jadmanski10646442008-08-13 14:05:21 +000045# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000046get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000047 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000048 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000049
50
mbligh0d0f67d2009-11-06 03:15:03 +000051class base_server_job(base_job.base_job):
52 """The server-side concrete implementation of base_job.
jadmanski10646442008-08-13 14:05:21 +000053
mbligh0d0f67d2009-11-06 03:15:03 +000054 Optional properties provided by this implementation:
55 serverdir
56 conmuxdir
57
58 num_tests_run
59 num_tests_failed
60
61 warning_manager
62 warning_loggers
jadmanski10646442008-08-13 14:05:21 +000063 """
64
mbligh0d0f67d2009-11-06 03:15:03 +000065 _STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000066
67 def __init__(self, control, args, resultdir, label, user, machines,
68 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000069 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000070 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000071 """
mbligh374f3412009-05-13 21:29:45 +000072 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000073
mblighe7d9c602009-07-02 19:02:33 +000074 @param control: The pathname of the control file.
75 @param args: Passed to the control file.
76 @param resultdir: Where to throw the results.
77 @param label: Description of the job.
78 @param user: Username for the job (email address).
79 @param client: True if this is a client-side control file.
80 @param parse_job: string, if supplied it is the job execution tag that
81 the results will be passed through to the TKO parser with.
82 @param ssh_user: The SSH username. [root]
83 @param ssh_port: The SSH port number. [22]
84 @param ssh_pass: The SSH passphrase, if needed.
85 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000086 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000087 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000088 """
mbligh0d0f67d2009-11-06 03:15:03 +000089 super(base_server_job, self).__init__(resultdir=resultdir)
mbligha788dc42009-03-26 21:10:16 +000090
mbligh0d0f67d2009-11-06 03:15:03 +000091 path = os.path.dirname(__file__)
92 self.control = control
93 self._uncollected_log_file = os.path.join(self.resultdir,
94 'uncollected_logs')
95 debugdir = os.path.join(self.resultdir, 'debug')
96 if not os.path.exists(debugdir):
97 os.mkdir(debugdir)
98
99 if user:
100 self.user = user
101 else:
102 self.user = getpass.getuser()
103
104 self._args = args
jadmanski10646442008-08-13 14:05:21 +0000105 self.machines = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000106 self._client = client
107 self._record_prefix = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000109 self.warning_manager = warning_manager()
mbligh0d0f67d2009-11-06 03:15:03 +0000110 self._ssh_user = ssh_user
111 self._ssh_port = ssh_port
112 self._ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000113 self.tag = tag
mbligh09108442008-10-15 16:27:38 +0000114 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000115 self.hosts = set()
mbligh0d0f67d2009-11-06 03:15:03 +0000116 self.drop_caches = False
mblighb5dac432008-11-27 00:38:44 +0000117 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000118
showard75cdfee2009-06-10 17:40:41 +0000119 self.logging = logging_manager.get_logging_manager(
120 manage_stdout_and_stderr=True, redirect_fds=True)
121 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000122
mbligh0d0f67d2009-11-06 03:15:03 +0000123 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000124 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000125
jadmanski10646442008-08-13 14:05:21 +0000126 job_data = {'label' : label, 'user' : user,
127 'hostname' : ','.join(machines),
mbligh0d0f67d2009-11-06 03:15:03 +0000128 'status_version' : str(self._STATUS_VERSION),
showard170873e2009-01-07 00:22:26 +0000129 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000130 if group_name:
131 job_data['host_group_name'] = group_name
jadmanski10646442008-08-13 14:05:21 +0000132
mbligh0d0f67d2009-11-06 03:15:03 +0000133 # only write these keyvals out on the first job in a resultdir
134 if 'job_started' not in utils.read_keyval(self.resultdir):
135 job_data.update(get_site_job_data(self))
136 utils.write_keyval(self.resultdir, job_data)
137
138 self._parse_job = parse_job
139 if self._parse_job and len(machines) == 1:
140 self._using_parser = True
141 self.init_parser(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000142 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000143 self._using_parser = False
144 self.pkgmgr = packages.PackageManager(
145 self.autodir, run_function_dargs={'timeout':600})
showard21baa452008-10-21 00:08:39 +0000146 self.num_tests_run = 0
147 self.num_tests_failed = 0
148
mbligh15971eb2009-12-29 02:55:23 +0000149 # should tell us if this job results are inside a machine named
150 # directory
151 self.in_machine_dir = False
152
jadmanski550fdc22008-11-20 16:32:08 +0000153 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000154 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000155
mbligh0d0f67d2009-11-06 03:15:03 +0000156 # these components aren't usable on the server
157 self.bootloader = None
158 self.harness = None
159
160
161 @classmethod
162 def _find_base_directories(cls):
163 """
164 Determine locations of autodir, clientdir and serverdir. Assumes
165 that this file is located within serverdir and uses __file__ along
166 with relative paths to resolve the location.
167 """
168 serverdir = os.path.abspath(os.path.dirname(__file__))
169 autodir = os.path.normpath(os.path.join(serverdir, '..'))
170 clientdir = os.path.join(autodir, 'client')
171 return autodir, clientdir, serverdir
172
173
174 def _find_resultdir(self, resultdir):
175 """
176 Determine the location of resultdir. For server jobs we expect one to
177 always be explicitly passed in to __init__, so just return that.
178 """
179 if resultdir:
180 return os.path.normpath(resultdir)
181 else:
182 return None
183
jadmanski550fdc22008-11-20 16:32:08 +0000184
jadmanskie432dd22009-01-30 15:04:51 +0000185 @staticmethod
186 def _load_control_file(path):
187 f = open(path)
188 try:
189 control_file = f.read()
190 finally:
191 f.close()
192 return re.sub('\r', '', control_file)
193
194
jadmanski550fdc22008-11-20 16:32:08 +0000195 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000196 """
197 Register some hooks into the subcommand modules that allow us
198 to properly clean up self.hosts created in forked subprocesses.
199 """
jadmanski550fdc22008-11-20 16:32:08 +0000200 def on_fork(cmd):
201 self._existing_hosts_on_fork = set(self.hosts)
202 def on_join(cmd):
203 new_hosts = self.hosts - self._existing_hosts_on_fork
204 for host in new_hosts:
205 host.close()
206 subcommand.subcommand.register_fork_hook(on_fork)
207 subcommand.subcommand.register_join_hook(on_join)
208
jadmanski10646442008-08-13 14:05:21 +0000209
210 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000211 """
212 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000213 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000214 the database if necessary.
215 """
jadmanski10646442008-08-13 14:05:21 +0000216 # redirect parser debugging to .parse.log
217 parse_log = os.path.join(resultdir, '.parse.log')
218 parse_log = open(parse_log, 'w', 0)
219 tko_utils.redirect_parser_debugging(parse_log)
220 # create a job model object and set up the db
221 self.results_db = tko_db.db(autocommit=True)
mbligh0d0f67d2009-11-06 03:15:03 +0000222 self.parser = status_lib.parser(self._STATUS_VERSION)
jadmanski10646442008-08-13 14:05:21 +0000223 self.job_model = self.parser.make_job(resultdir)
224 self.parser.start(self.job_model)
225 # check if a job already exists in the db and insert it if
226 # it does not
mbligh0d0f67d2009-11-06 03:15:03 +0000227 job_idx = self.results_db.find_job(self._parse_job)
jadmanski10646442008-08-13 14:05:21 +0000228 if job_idx is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000229 self.results_db.insert_job(self._parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000230 else:
mbligh2b92b862008-11-22 13:25:32 +0000231 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000232 self.job_model.index = job_idx
233 self.job_model.machine_idx = machine_idx
234
235
236 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000237 """
238 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000239 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000240 remaining test results to the results db)
241 """
mbligh0d0f67d2009-11-06 03:15:03 +0000242 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +0000243 return
244 final_tests = self.parser.end()
245 for test in final_tests:
246 self.__insert_test(test)
mbligh0d0f67d2009-11-06 03:15:03 +0000247 self._using_parser = False
jadmanski10646442008-08-13 14:05:21 +0000248
249
250 def verify(self):
251 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000252 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000253 if self.resultdir:
254 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000255 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000256 namespace = {'machines' : self.machines, 'job' : self,
mbligh0d0f67d2009-11-06 03:15:03 +0000257 'ssh_user' : self._ssh_user,
258 'ssh_port' : self._ssh_port,
259 'ssh_pass' : self._ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000260 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000261 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000262 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000263 self.record('ABORT', None, None, msg)
264 raise
265
266
267 def repair(self, host_protection):
268 if not self.machines:
269 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000270 if self.resultdir:
271 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000272 namespace = {'machines': self.machines, 'job': self,
mbligh0d0f67d2009-11-06 03:15:03 +0000273 'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
274 'ssh_pass': self._ssh_pass,
jadmanski10646442008-08-13 14:05:21 +0000275 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000276
mbligh0931b0a2009-04-08 17:44:48 +0000277 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000278
279
280 def precheck(self):
281 """
282 perform any additional checks in derived classes.
283 """
284 pass
285
286
287 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000288 """
289 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000290 """
291 pass
292
293
294 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000295 """
296 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000297 """
298 pass
299
300
301 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000302 """
303 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000304 """
305 return False
306
307
mbligh415dc212009-06-15 21:53:34 +0000308 def _make_parallel_wrapper(self, function, machines, log):
309 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000310 is_forking = not (len(machines) == 1 and self.machines == machines)
mbligh0d0f67d2009-11-06 03:15:03 +0000311 if self._parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000312 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000313 self._parse_job += "/" + machine
314 self._using_parser = True
jadmanski10646442008-08-13 14:05:21 +0000315 self.machines = [machine]
mbligh0d0f67d2009-11-06 03:15:03 +0000316 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000317 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000318 self.in_machine_dir = True
showard2bab8f42008-11-12 18:15:22 +0000319 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000320 self.init_parser(self.resultdir)
321 result = function(machine)
322 self.cleanup_parser()
323 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000324 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000325 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000326 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000327 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000328 self.in_machine_dir = True
mbligh838d82d2009-03-11 17:14:31 +0000329 machine_data = {'hostname' : machine,
mbligh0d0f67d2009-11-06 03:15:03 +0000330 'status_version' : str(self._STATUS_VERSION)}
mbligh838d82d2009-03-11 17:14:31 +0000331 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000332 result = function(machine)
333 return result
334 else:
335 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000336 return wrapper
337
338
339 def parallel_simple(self, function, machines, log=True, timeout=None,
340 return_results=False):
341 """
342 Run 'function' using parallel_simple, with an extra wrapper to handle
343 the necessary setup for continuous parsing, if possible. If continuous
344 parsing is already properly initialized then this should just work.
345
346 @param function: A callable to run in parallel given each machine.
347 @param machines: A list of machine names to be passed one per subcommand
348 invocation of function.
349 @param log: If True, output will be written to output in a subdirectory
350 named after each machine.
351 @param timeout: Seconds after which the function call should timeout.
352 @param return_results: If True instead of an AutoServError being raised
353 on any error a list of the results|exceptions from the function
354 called on each arg is returned. [default: False]
355
356 @raises error.AutotestError: If any of the functions failed.
357 """
358 wrapper = self._make_parallel_wrapper(function, machines, log)
359 return subcommand.parallel_simple(wrapper, machines,
360 log=log, timeout=timeout,
361 return_results=return_results)
362
363
364 def parallel_on_machines(self, function, machines, timeout=None):
365 """
showardcd5fac42009-07-06 20:19:43 +0000366 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000367 @param machines: A list of machines to call function(machine) on.
368 @param timeout: Seconds after which the function call should timeout.
369
370 @returns A list of machines on which function(machine) returned
371 without raising an exception.
372 """
showardcd5fac42009-07-06 20:19:43 +0000373 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000374 return_results=True)
375 success_machines = []
376 for result, machine in itertools.izip(results, machines):
377 if not isinstance(result, Exception):
378 success_machines.append(machine)
379 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000380
381
mbligh0d0f67d2009-11-06 03:15:03 +0000382 _USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000383 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000384 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000385 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000386 # for a normal job, make sure the uncollected logs file exists
387 # for a crashinfo-only run it should already exist, bail out otherwise
mbligh0d0f67d2009-11-06 03:15:03 +0000388 if self.resultdir and not os.path.exists(self._uncollected_log_file):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000389 if only_collect_crashinfo:
390 # if this is a crashinfo-only run, and there were no existing
391 # uncollected logs, just bail out early
392 logging.info("No existing uncollected logs, "
393 "skipping crashinfo collection")
394 return
395 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000396 log_file = open(self._uncollected_log_file, "w")
jadmanskifb9c0fa2009-04-29 17:39:16 +0000397 pickle.dump([], log_file)
398 log_file.close()
399
jadmanski10646442008-08-13 14:05:21 +0000400 # use a copy so changes don't affect the original dictionary
401 namespace = namespace.copy()
402 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000403 if control is None:
jadmanski02a3ba22009-11-13 20:47:27 +0000404 if self.control is None:
405 control = ''
406 else:
407 control = self._load_control_file(self.control)
jadmanskie432dd22009-01-30 15:04:51 +0000408 if control_file_dir is None:
409 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000410
411 self.aborted = False
412 namespace['machines'] = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000413 namespace['args'] = self._args
jadmanski10646442008-08-13 14:05:21 +0000414 namespace['job'] = self
mbligh0d0f67d2009-11-06 03:15:03 +0000415 namespace['ssh_user'] = self._ssh_user
416 namespace['ssh_port'] = self._ssh_port
417 namespace['ssh_pass'] = self._ssh_pass
jadmanski10646442008-08-13 14:05:21 +0000418 test_start_time = int(time.time())
419
mbligh80e1eba2008-11-19 00:26:18 +0000420 if self.resultdir:
421 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000422 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000423 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000424 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000425
jadmanskicdd0c402008-09-19 21:21:31 +0000426 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000427 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000428 try:
showardcf8d4922009-10-14 16:08:39 +0000429 try:
430 if install_before and machines:
431 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000432
showardcf8d4922009-10-14 16:08:39 +0000433 if only_collect_crashinfo:
434 return
435
jadmanskidef0c3c2009-03-25 20:07:10 +0000436 # determine the dir to write the control files to
437 cfd_specified = (control_file_dir
mbligh0d0f67d2009-11-06 03:15:03 +0000438 and control_file_dir is not self._USE_TEMP_DIR)
jadmanskidef0c3c2009-03-25 20:07:10 +0000439 if cfd_specified:
440 temp_control_file_dir = None
441 else:
442 temp_control_file_dir = tempfile.mkdtemp(
443 suffix='temp_control_file_dir')
444 control_file_dir = temp_control_file_dir
445 server_control_file = os.path.join(control_file_dir,
446 SERVER_CONTROL_FILENAME)
447 client_control_file = os.path.join(control_file_dir,
448 CLIENT_CONTROL_FILENAME)
mbligh0d0f67d2009-11-06 03:15:03 +0000449 if self._client:
jadmanskidef0c3c2009-03-25 20:07:10 +0000450 namespace['control'] = control
451 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000452 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
453 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000454 else:
455 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000456 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000457 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000458 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000459
jadmanskidef0c3c2009-03-25 20:07:10 +0000460 # no error occured, so we don't need to collect crashinfo
461 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000462 except:
463 try:
464 logging.exception(
465 'Exception escaped control file, job aborting:')
466 except:
467 pass # don't let logging exceptions here interfere
468 raise
jadmanski10646442008-08-13 14:05:21 +0000469 finally:
mblighaebe3b62008-12-22 14:45:40 +0000470 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000471 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000472 try:
473 shutil.rmtree(temp_control_file_dir)
474 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000475 logging.warn('Could not remove temp directory %s: %s',
476 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000477
jadmanskicdd0c402008-09-19 21:21:31 +0000478 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000479 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000480 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000481 # includes crashdumps
482 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000483 else:
mbligh084bc172008-10-18 14:02:45 +0000484 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligh0d0f67d2009-11-06 03:15:03 +0000485 if self._uncollected_log_file:
486 os.remove(self._uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000487 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000488 if cleanup and machines:
489 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000490 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000491 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000492
493
mbligh7eacbc22009-07-28 23:13:56 +0000494 def set_test_tag_prefix(self, tag=''):
495 """
496 Set tag to be prepended (separated by a '.') to test name of all
497 following run_test steps.
498 """
499 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000500
501
jadmanski10646442008-08-13 14:05:21 +0000502 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000503 """
504 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000505
506 tag
507 tag to add to testname
508 url
509 url of the test to run
510 """
511
512 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000513
514 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000515 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000516 tag = self._test_tag_prefix
517 elif self._test_tag_prefix:
518 tag = '%s.%s' % (self._test_tag_prefix, tag)
519
jadmanski10646442008-08-13 14:05:21 +0000520 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000521 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000522 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000523
524 outputdir = os.path.join(self.resultdir, subdir)
525 if os.path.exists(outputdir):
526 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000527 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000528 raise error.TestError(msg)
529 os.mkdir(outputdir)
530
531 def group_func():
532 try:
533 test.runtest(self, url, tag, args, dargs)
534 except error.TestBaseException, e:
535 self.record(e.exit_status, subdir, testname, str(e))
536 raise
537 except Exception, e:
538 info = str(e) + "\n" + traceback.format_exc()
539 self.record('FAIL', subdir, testname, info)
540 raise
541 else:
mbligh2b92b862008-11-22 13:25:32 +0000542 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000543
544 result, exc_info = self._run_group(testname, subdir, group_func)
545 if exc_info and isinstance(exc_info[1], error.TestBaseException):
546 return False
547 elif exc_info:
548 raise exc_info[0], exc_info[1], exc_info[2]
549 else:
550 return True
jadmanski10646442008-08-13 14:05:21 +0000551
552
553 def _run_group(self, name, subdir, function, *args, **dargs):
554 """\
555 Underlying method for running something inside of a group.
556 """
jadmanskide292df2008-08-26 20:51:14 +0000557 result, exc_info = None, None
mbligh0d0f67d2009-11-06 03:15:03 +0000558 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000559 try:
560 self.record('START', subdir, name)
mbligh0d0f67d2009-11-06 03:15:03 +0000561 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000562 try:
563 result = function(*args, **dargs)
564 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000565 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000566 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000567 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000568 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000569 except Exception, e:
570 err_msg = str(e) + '\n'
571 err_msg += traceback.format_exc()
572 self.record('END ABORT', subdir, name, err_msg)
573 raise error.JobError(name + ' failed\n' + traceback.format_exc())
574 else:
575 self.record('END GOOD', subdir, name)
576
jadmanskide292df2008-08-26 20:51:14 +0000577 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000578
579
580 def run_group(self, function, *args, **dargs):
581 """\
582 function:
583 subroutine to run
584 *args:
585 arguments for the function
586 """
587
588 name = function.__name__
589
590 # Allow the tag for the group to be specified.
591 tag = dargs.pop('tag', None)
592 if tag:
593 name = tag
594
jadmanskide292df2008-08-26 20:51:14 +0000595 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000596
597
598 def run_reboot(self, reboot_func, get_kernel_func):
599 """\
600 A specialization of run_group meant specifically for handling
601 a reboot. Includes support for capturing the kernel version
602 after the reboot.
603
604 reboot_func: a function that carries out the reboot
605
606 get_kernel_func: a function that returns a string
607 representing the kernel version.
608 """
609
mbligh0d0f67d2009-11-06 03:15:03 +0000610 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000611 try:
612 self.record('START', None, 'reboot')
mbligh0d0f67d2009-11-06 03:15:03 +0000613 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000614 reboot_func()
615 except Exception, e:
mbligh0d0f67d2009-11-06 03:15:03 +0000616 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000617 err_msg = str(e) + '\n' + traceback.format_exc()
618 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000619 raise
jadmanski10646442008-08-13 14:05:21 +0000620 else:
621 kernel = get_kernel_func()
mbligh0d0f67d2009-11-06 03:15:03 +0000622 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000623 self.record('END GOOD', None, 'reboot',
624 optional_fields={"kernel": kernel})
625
626
jadmanskie432dd22009-01-30 15:04:51 +0000627 def run_control(self, path):
628 """Execute a control file found at path (relative to the autotest
629 path). Intended for executing a control file within a control file,
630 not for running the top-level job control file."""
631 path = os.path.join(self.autodir, path)
632 control_file = self._load_control_file(path)
mbligh0d0f67d2009-11-06 03:15:03 +0000633 self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
jadmanskie432dd22009-01-30 15:04:51 +0000634
635
jadmanskic09fc152008-10-15 17:56:59 +0000636 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000637 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000638 on_every_test)
639
640
641 def add_sysinfo_logfile(self, file, on_every_test=False):
642 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
643
644
645 def _add_sysinfo_loggable(self, loggable, on_every_test):
646 if on_every_test:
647 self.sysinfo.test_loggables.add(loggable)
648 else:
649 self.sysinfo.boot_loggables.add(loggable)
650
651
jadmanski10646442008-08-13 14:05:21 +0000652 def record(self, status_code, subdir, operation, status='',
653 optional_fields=None):
654 """
655 Record job-level status
656
657 The intent is to make this file both machine parseable and
658 human readable. That involves a little more complexity, but
659 really isn't all that bad ;-)
660
661 Format is <status code>\t<subdir>\t<operation>\t<status>
662
mbligh1b3b3762008-09-25 02:46:34 +0000663 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000664 for valid status definition
665
666 subdir: MUST be a relevant subdirectory in the results,
667 or None, which will be represented as '----'
668
669 operation: description of what you ran (e.g. "dbench", or
670 "mkfs -t foobar /dev/sda9")
671
672 status: error message or "completed sucessfully"
673
674 ------------------------------------------------------------
675
676 Initial tabs indicate indent levels for grouping, and is
mbligh0d0f67d2009-11-06 03:15:03 +0000677 governed by self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000678
679 multiline messages have secondary lines prefaced by a double
680 space (' ')
681
682 Executing this method will trigger the logging of all new
683 warnings to date from the various console loggers.
684 """
685 # poll all our warning loggers for new warnings
686 warnings = self._read_warnings()
mbligh0d0f67d2009-11-06 03:15:03 +0000687 old_record_prefix = self._record_prefix
jadmanski2de83112009-04-01 18:21:04 +0000688 try:
689 if status_code.startswith("END "):
mbligh0d0f67d2009-11-06 03:15:03 +0000690 self._record_prefix += "\t"
jadmanski2de83112009-04-01 18:21:04 +0000691 for timestamp, msg in warnings:
692 self._record("WARN", None, None, msg, timestamp)
693 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000694 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000695
696 # write out the actual status log line
697 self._record(status_code, subdir, operation, status,
698 optional_fields=optional_fields)
699
700
701 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000702 """Poll all the warning loggers and extract any new warnings that have
703 been logged. If the warnings belong to a category that is currently
704 disabled, this method will discard them and they will no longer be
705 retrievable.
706
707 Returns a list of (timestamp, message) tuples, where timestamp is an
708 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000709 warnings = []
710 while True:
711 # pull in a line of output from every logger that has
712 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000713 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000714 closed_loggers = set()
715 for logger in loggers:
716 line = logger.readline()
717 # record any broken pipes (aka line == empty)
718 if len(line) == 0:
719 closed_loggers.add(logger)
720 continue
jadmanskif37df842009-02-11 00:03:26 +0000721 # parse out the warning
722 timestamp, msgtype, msg = line.split('\t', 2)
723 timestamp = int(timestamp)
724 # if the warning is valid, add it to the results
725 if self.warning_manager.is_valid(timestamp, msgtype):
726 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000727
728 # stop listening to loggers that are closed
729 self.warning_loggers -= closed_loggers
730
731 # stop if none of the loggers have any output left
732 if not loggers:
733 break
734
735 # sort into timestamp order
736 warnings.sort()
737 return warnings
738
739
jadmanski16a7ff72009-04-01 18:19:53 +0000740 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000741 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000742 self.record("INFO", None, None,
743 "disabling %s warnings" % warning_type,
744 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000745
746
jadmanski16a7ff72009-04-01 18:19:53 +0000747 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000748 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000749 self.record("INFO", None, None,
750 "enabling %s warnings" % warning_type,
751 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000752
753
jadmanski779bd292009-03-19 17:33:33 +0000754 def get_status_log_path(self, subdir=None):
755 """Return the path to the job status log.
756
757 @param subdir - Optional paramter indicating that you want the path
758 to a subdirectory status log.
759
760 @returns The path where the status log should be.
761 """
mbligh210bae62009-04-01 18:33:13 +0000762 if self.resultdir:
763 if subdir:
764 return os.path.join(self.resultdir, subdir, "status.log")
765 else:
766 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000767 else:
mbligh210bae62009-04-01 18:33:13 +0000768 return None
jadmanski779bd292009-03-19 17:33:33 +0000769
770
jadmanski6bb32d72009-03-19 20:25:24 +0000771 def _update_uncollected_logs_list(self, update_func):
772 """Updates the uncollected logs list in a multi-process safe manner.
773
774 @param update_func - a function that updates the list of uncollected
775 logs. Should take one parameter, the list to be updated.
776 """
mbligh0d0f67d2009-11-06 03:15:03 +0000777 if self._uncollected_log_file:
778 log_file = open(self._uncollected_log_file, "r+")
mbligha788dc42009-03-26 21:10:16 +0000779 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000780 try:
781 uncollected_logs = pickle.load(log_file)
782 update_func(uncollected_logs)
783 log_file.seek(0)
784 log_file.truncate()
785 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000786 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000787 finally:
788 fcntl.flock(log_file, fcntl.LOCK_UN)
789 log_file.close()
790
791
792 def add_client_log(self, hostname, remote_path, local_path):
793 """Adds a new set of client logs to the list of uncollected logs,
794 to allow for future log recovery.
795
796 @param host - the hostname of the machine holding the logs
797 @param remote_path - the directory on the remote machine holding logs
798 @param local_path - the local directory to copy the logs into
799 """
800 def update_func(logs_list):
801 logs_list.append((hostname, remote_path, local_path))
802 self._update_uncollected_logs_list(update_func)
803
804
805 def remove_client_log(self, hostname, remote_path, local_path):
806 """Removes a set of client logs from the list of uncollected logs,
807 to allow for future log recovery.
808
809 @param host - the hostname of the machine holding the logs
810 @param remote_path - the directory on the remote machine holding logs
811 @param local_path - the local directory to copy the logs into
812 """
813 def update_func(logs_list):
814 logs_list.remove((hostname, remote_path, local_path))
815 self._update_uncollected_logs_list(update_func)
816
817
mbligh0d0f67d2009-11-06 03:15:03 +0000818 def get_client_logs(self):
819 """Retrieves the list of uncollected logs, if it exists.
820
821 @returns A list of (host, remote_path, local_path) tuples. Returns
822 an empty list if no uncollected logs file exists.
823 """
824 log_exists = (self._uncollected_log_file and
825 os.path.exists(self._uncollected_log_file))
826 if log_exists:
827 return pickle.load(open(self._uncollected_log_file))
828 else:
829 return []
830
831
jadmanski10646442008-08-13 14:05:21 +0000832 def _render_record(self, status_code, subdir, operation, status='',
833 epoch_time=None, record_prefix=None,
834 optional_fields=None):
835 """
836 Internal Function to generate a record to be written into a
837 status log. For use by server_job.* classes only.
838 """
839 if subdir:
840 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000841 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000842 substr = subdir
843 else:
844 substr = '----'
845
mbligh1b3b3762008-09-25 02:46:34 +0000846 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000847 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000848 if not operation:
849 operation = '----'
850 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000851 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000852 operation = operation.rstrip()
853 status = status.rstrip()
854 status = re.sub(r"\t", " ", status)
855 # Ensure any continuation lines are marked so we can
856 # detect them in the status file to ensure it is parsable.
mbligh0d0f67d2009-11-06 03:15:03 +0000857 status = re.sub(r"\n", "\n" + self._record_prefix + " ", status)
jadmanski10646442008-08-13 14:05:21 +0000858
859 if not optional_fields:
860 optional_fields = {}
861
862 # Generate timestamps for inclusion in the logs
863 if epoch_time is None:
864 epoch_time = int(time.time())
865 local_time = time.localtime(epoch_time)
866 optional_fields["timestamp"] = str(epoch_time)
867 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
868 local_time)
869
870 fields = [status_code, substr, operation]
871 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
872 fields.append(status)
873
874 if record_prefix is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000875 record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000876
877 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000878 return record_prefix + msg + '\n'
879
880
881 def _record_prerendered(self, msg):
882 """
883 Record a pre-rendered msg into the status logs. The only
884 change this makes to the message is to add on the local
885 indentation. Should not be called outside of server_job.*
886 classes. Unlike _record, this does not write the message
887 to standard output.
888 """
889 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000890 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000891 status_log = open(status_file, 'a')
892 for line in msg.splitlines():
mbligh0d0f67d2009-11-06 03:15:03 +0000893 line = self._record_prefix + line + '\n'
jadmanski10646442008-08-13 14:05:21 +0000894 lines.append(line)
895 status_log.write(line)
896 status_log.close()
897 self.__parse_status(lines)
898
899
mbligh084bc172008-10-18 14:02:45 +0000900 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000901 """
902 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000903
904 This sets up the control file API by importing modules and making them
905 available under the appropriate names within namespace.
906
907 For use by _execute_code().
908
909 Args:
910 namespace: The namespace dictionary to fill in.
911 protect: Boolean. If True (the default) any operation that would
912 clobber an existing entry in namespace will cause an error.
913 Raises:
914 error.AutoservError: When a name would be clobbered by import.
915 """
916 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000917 """
918 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000919
920 Args:
921 module_name: The string module name.
922 names: A limiting list of names to import from module_name. If
923 empty (the default), all names are imported from the module
924 similar to a "from foo.bar import *" statement.
925 Raises:
926 error.AutoservError: When a name being imported would clobber
927 a name already in namespace.
928 """
929 module = __import__(module_name, {}, {}, names)
930
931 # No names supplied? Import * from the lowest level module.
932 # (Ugh, why do I have to implement this part myself?)
933 if not names:
934 for submodule_name in module_name.split('.')[1:]:
935 module = getattr(module, submodule_name)
936 if hasattr(module, '__all__'):
937 names = getattr(module, '__all__')
938 else:
939 names = dir(module)
940
941 # Install each name into namespace, checking to make sure it
942 # doesn't override anything that already exists.
943 for name in names:
944 # Check for conflicts to help prevent future problems.
945 if name in namespace and protect:
946 if namespace[name] is not getattr(module, name):
947 raise error.AutoservError('importing name '
948 '%s from %s %r would override %r' %
949 (name, module_name, getattr(module, name),
950 namespace[name]))
951 else:
952 # Encourage cleanliness and the use of __all__ for a
953 # more concrete API with less surprises on '*' imports.
954 warnings.warn('%s (%r) being imported from %s for use '
955 'in server control files is not the '
956 'first occurrance of that import.' %
957 (name, namespace[name], module_name))
958
959 namespace[name] = getattr(module, name)
960
961
962 # This is the equivalent of prepending a bunch of import statements to
963 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000964 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000965 _import_names('autotest_lib.server',
966 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
967 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
968 _import_names('autotest_lib.server.subcommand',
969 ('parallel', 'parallel_simple', 'subcommand'))
970 _import_names('autotest_lib.server.utils',
971 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
972 _import_names('autotest_lib.client.common_lib.error')
973 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
974
975 # Inject ourself as the job object into other classes within the API.
976 # (Yuck, this injection is a gross thing be part of a public API. -gps)
977 #
978 # XXX Base & SiteAutotest do not appear to use .job. Who does?
979 namespace['autotest'].Autotest.job = self
980 # server.hosts.base_classes.Host uses .job.
981 namespace['hosts'].Host.job = self
982
983
984 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000985 """
986 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000987
988 Unless protect_namespace is explicitly set to False, the dict will not
989 be modified.
990
991 Args:
992 code_file: The filename of the control file to execute.
993 namespace: A dict containing names to make available during execution.
994 protect: Boolean. If True (the default) a copy of the namespace dict
995 is used during execution to prevent the code from modifying its
996 contents outside of this function. If False the raw dict is
997 passed in and modifications will be allowed.
998 """
999 if protect:
1000 namespace = namespace.copy()
1001 self._fill_server_control_namespace(namespace, protect=protect)
1002 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001003 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001004 machines_text = '\n'.join(self.machines) + '\n'
1005 # Only rewrite the file if it does not match our machine list.
1006 try:
1007 machines_f = open(MACHINES_FILENAME, 'r')
1008 existing_machines_text = machines_f.read()
1009 machines_f.close()
1010 except EnvironmentError:
1011 existing_machines_text = None
1012 if machines_text != existing_machines_text:
1013 utils.open_write_close(MACHINES_FILENAME, machines_text)
1014 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001015
1016
1017 def _record(self, status_code, subdir, operation, status='',
1018 epoch_time=None, optional_fields=None):
1019 """
1020 Actual function for recording a single line into the status
1021 logs. Should never be called directly, only by job.record as
1022 this would bypass the console monitor logging.
1023 """
1024
mbligh2b92b862008-11-22 13:25:32 +00001025 msg = self._render_record(status_code, subdir, operation, status,
1026 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001027
jadmanski779bd292009-03-19 17:33:33 +00001028 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001029 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001030 if status_file:
1031 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001032 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001033 sub_status_file = self.get_status_log_path(subdir)
1034 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001035 self.__parse_status(msg.splitlines())
1036
1037
1038 def __parse_status(self, new_lines):
mbligh0d0f67d2009-11-06 03:15:03 +00001039 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +00001040 return
1041 new_tests = self.parser.process_lines(new_lines)
1042 for test in new_tests:
1043 self.__insert_test(test)
1044
1045
1046 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001047 """
1048 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001049 database. This method will not raise an exception, even if an
1050 error occurs during the insert, to avoid failing a test
1051 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001052 self.num_tests_run += 1
1053 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1054 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001055 try:
1056 self.results_db.insert_test(self.job_model, test)
1057 except Exception:
1058 msg = ("WARNING: An unexpected error occured while "
1059 "inserting test results into the database. "
1060 "Ignoring error.\n" + traceback.format_exc())
1061 print >> sys.stderr, msg
1062
mblighcaa62c22008-04-07 21:51:17 +00001063
mbligha7007722009-01-13 00:37:11 +00001064site_server_job = utils.import_site_class(
1065 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1066 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001067
mbligh0a8c3322009-04-28 18:32:19 +00001068class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001069 pass
jadmanskif37df842009-02-11 00:03:26 +00001070
1071
1072class warning_manager(object):
1073 """Class for controlling warning logs. Manages the enabling and disabling
1074 of warnings."""
1075 def __init__(self):
1076 # a map of warning types to a list of disabled time intervals
1077 self.disabled_warnings = {}
1078
1079
1080 def is_valid(self, timestamp, warning_type):
1081 """Indicates if a warning (based on the time it occured and its type)
1082 is a valid warning. A warning is considered "invalid" if this type of
1083 warning was marked as "disabled" at the time the warning occured."""
1084 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1085 for start, end in disabled_intervals:
1086 if timestamp >= start and (end is None or timestamp < end):
1087 return False
1088 return True
1089
1090
1091 def disable_warnings(self, warning_type, current_time_func=time.time):
1092 """As of now, disables all further warnings of this type."""
1093 intervals = self.disabled_warnings.setdefault(warning_type, [])
1094 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001095 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001096
1097
1098 def enable_warnings(self, warning_type, current_time_func=time.time):
1099 """As of now, enables all further warnings of this type."""
1100 intervals = self.disabled_warnings.get(warning_type, [])
1101 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001102 intervals[-1] = (intervals[-1][0], int(current_time_func()))