blob: 97a6b6d58348069574217ec3c1cb55423c9bb81b [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh0d0f67d2009-11-06 03:15:03 +000013from autotest_lib.client.common_lib import base_job
mbligh09108442008-10-15 16:27:38 +000014from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000015from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000016from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000017from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000018
19
mbligh084bc172008-10-18 14:02:45 +000020def _control_segment_path(name):
21 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000022 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000023 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000024
25
mbligh084bc172008-10-18 14:02:45 +000026CLIENT_CONTROL_FILENAME = 'control'
27SERVER_CONTROL_FILENAME = 'control.srv'
28MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000029
mbligh084bc172008-10-18 14:02:45 +000030CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
31CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
32CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000033INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000034CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000035
mbligh084bc172008-10-18 14:02:45 +000036VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000037REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000038
39
mbligh062ed152009-01-13 00:57:14 +000040# by default provide a stub that generates no site data
41def _get_site_job_data_dummy(job):
42 return {}
43
44
jadmanski10646442008-08-13 14:05:21 +000045# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000046get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000047 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000048 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000049
50
mbligh0d0f67d2009-11-06 03:15:03 +000051class base_server_job(base_job.base_job):
52 """The server-side concrete implementation of base_job.
jadmanski10646442008-08-13 14:05:21 +000053
mbligh0d0f67d2009-11-06 03:15:03 +000054 Optional properties provided by this implementation:
55 serverdir
56 conmuxdir
57
58 num_tests_run
59 num_tests_failed
60
61 warning_manager
62 warning_loggers
jadmanski10646442008-08-13 14:05:21 +000063 """
64
mbligh0d0f67d2009-11-06 03:15:03 +000065 _STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000066
67 def __init__(self, control, args, resultdir, label, user, machines,
68 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000069 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000070 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000071 """
mbligh374f3412009-05-13 21:29:45 +000072 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000073
mblighe7d9c602009-07-02 19:02:33 +000074 @param control: The pathname of the control file.
75 @param args: Passed to the control file.
76 @param resultdir: Where to throw the results.
77 @param label: Description of the job.
78 @param user: Username for the job (email address).
79 @param client: True if this is a client-side control file.
80 @param parse_job: string, if supplied it is the job execution tag that
81 the results will be passed through to the TKO parser with.
82 @param ssh_user: The SSH username. [root]
83 @param ssh_port: The SSH port number. [22]
84 @param ssh_pass: The SSH passphrase, if needed.
85 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000086 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000087 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000088 """
mbligh0d0f67d2009-11-06 03:15:03 +000089 super(base_server_job, self).__init__(resultdir=resultdir)
mbligha788dc42009-03-26 21:10:16 +000090
mbligh0d0f67d2009-11-06 03:15:03 +000091 path = os.path.dirname(__file__)
92 self.control = control
93 self._uncollected_log_file = os.path.join(self.resultdir,
94 'uncollected_logs')
95 debugdir = os.path.join(self.resultdir, 'debug')
96 if not os.path.exists(debugdir):
97 os.mkdir(debugdir)
98
99 if user:
100 self.user = user
101 else:
102 self.user = getpass.getuser()
103
104 self._args = args
jadmanski10646442008-08-13 14:05:21 +0000105 self.machines = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000106 self._client = client
107 self._record_prefix = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000109 self.warning_manager = warning_manager()
mbligh0d0f67d2009-11-06 03:15:03 +0000110 self._ssh_user = ssh_user
111 self._ssh_port = ssh_port
112 self._ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000113 self.tag = tag
mbligh09108442008-10-15 16:27:38 +0000114 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000115 self.hosts = set()
mbligh0d0f67d2009-11-06 03:15:03 +0000116 self.drop_caches = False
mblighb5dac432008-11-27 00:38:44 +0000117 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000118
showard75cdfee2009-06-10 17:40:41 +0000119 self.logging = logging_manager.get_logging_manager(
120 manage_stdout_and_stderr=True, redirect_fds=True)
121 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000122
mbligh0d0f67d2009-11-06 03:15:03 +0000123 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000124 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000125
jadmanski10646442008-08-13 14:05:21 +0000126 job_data = {'label' : label, 'user' : user,
127 'hostname' : ','.join(machines),
mbligh0d0f67d2009-11-06 03:15:03 +0000128 'status_version' : str(self._STATUS_VERSION),
showard170873e2009-01-07 00:22:26 +0000129 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000130 if group_name:
131 job_data['host_group_name'] = group_name
jadmanski10646442008-08-13 14:05:21 +0000132
mbligh0d0f67d2009-11-06 03:15:03 +0000133 # only write these keyvals out on the first job in a resultdir
134 if 'job_started' not in utils.read_keyval(self.resultdir):
135 job_data.update(get_site_job_data(self))
136 utils.write_keyval(self.resultdir, job_data)
137
138 self._parse_job = parse_job
mbligh4608b002010-01-05 18:22:35 +0000139 self._using_parser = (self._parse_job and len(machines) == 1)
mbligh0d0f67d2009-11-06 03:15:03 +0000140 self.pkgmgr = packages.PackageManager(
141 self.autodir, run_function_dargs={'timeout':600})
showard21baa452008-10-21 00:08:39 +0000142 self.num_tests_run = 0
143 self.num_tests_failed = 0
144
mbligh15971eb2009-12-29 02:55:23 +0000145 # should tell us if this job results are inside a machine named
146 # directory
147 self.in_machine_dir = False
148
jadmanski550fdc22008-11-20 16:32:08 +0000149 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000150 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000151
mbligh0d0f67d2009-11-06 03:15:03 +0000152 # these components aren't usable on the server
153 self.bootloader = None
154 self.harness = None
155
156
157 @classmethod
158 def _find_base_directories(cls):
159 """
160 Determine locations of autodir, clientdir and serverdir. Assumes
161 that this file is located within serverdir and uses __file__ along
162 with relative paths to resolve the location.
163 """
164 serverdir = os.path.abspath(os.path.dirname(__file__))
165 autodir = os.path.normpath(os.path.join(serverdir, '..'))
166 clientdir = os.path.join(autodir, 'client')
167 return autodir, clientdir, serverdir
168
169
170 def _find_resultdir(self, resultdir):
171 """
172 Determine the location of resultdir. For server jobs we expect one to
173 always be explicitly passed in to __init__, so just return that.
174 """
175 if resultdir:
176 return os.path.normpath(resultdir)
177 else:
178 return None
179
jadmanski550fdc22008-11-20 16:32:08 +0000180
jadmanskie432dd22009-01-30 15:04:51 +0000181 @staticmethod
182 def _load_control_file(path):
183 f = open(path)
184 try:
185 control_file = f.read()
186 finally:
187 f.close()
188 return re.sub('\r', '', control_file)
189
190
jadmanski550fdc22008-11-20 16:32:08 +0000191 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000192 """
193 Register some hooks into the subcommand modules that allow us
194 to properly clean up self.hosts created in forked subprocesses.
195 """
jadmanski550fdc22008-11-20 16:32:08 +0000196 def on_fork(cmd):
197 self._existing_hosts_on_fork = set(self.hosts)
198 def on_join(cmd):
199 new_hosts = self.hosts - self._existing_hosts_on_fork
200 for host in new_hosts:
201 host.close()
202 subcommand.subcommand.register_fork_hook(on_fork)
203 subcommand.subcommand.register_join_hook(on_join)
204
jadmanski10646442008-08-13 14:05:21 +0000205
mbligh4608b002010-01-05 18:22:35 +0000206 def init_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000207 """
mbligh4608b002010-01-05 18:22:35 +0000208 Start the continuous parsing of self.resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000209 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000210 the database if necessary.
211 """
mbligh4608b002010-01-05 18:22:35 +0000212 if not self._using_parser:
213 return
jadmanski10646442008-08-13 14:05:21 +0000214 # redirect parser debugging to .parse.log
mbligh4608b002010-01-05 18:22:35 +0000215 parse_log = os.path.join(self.resultdir, '.parse.log')
jadmanski10646442008-08-13 14:05:21 +0000216 parse_log = open(parse_log, 'w', 0)
217 tko_utils.redirect_parser_debugging(parse_log)
218 # create a job model object and set up the db
219 self.results_db = tko_db.db(autocommit=True)
mbligh0d0f67d2009-11-06 03:15:03 +0000220 self.parser = status_lib.parser(self._STATUS_VERSION)
mbligh4608b002010-01-05 18:22:35 +0000221 self.job_model = self.parser.make_job(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000222 self.parser.start(self.job_model)
223 # check if a job already exists in the db and insert it if
224 # it does not
mbligh0d0f67d2009-11-06 03:15:03 +0000225 job_idx = self.results_db.find_job(self._parse_job)
jadmanski10646442008-08-13 14:05:21 +0000226 if job_idx is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000227 self.results_db.insert_job(self._parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000228 else:
mbligh2b92b862008-11-22 13:25:32 +0000229 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000230 self.job_model.index = job_idx
231 self.job_model.machine_idx = machine_idx
232
233
234 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000235 """
236 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000237 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000238 remaining test results to the results db)
239 """
mbligh0d0f67d2009-11-06 03:15:03 +0000240 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +0000241 return
242 final_tests = self.parser.end()
243 for test in final_tests:
244 self.__insert_test(test)
mbligh0d0f67d2009-11-06 03:15:03 +0000245 self._using_parser = False
jadmanski10646442008-08-13 14:05:21 +0000246
247
248 def verify(self):
249 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000250 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000251 if self.resultdir:
252 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000253 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000254 namespace = {'machines' : self.machines, 'job' : self,
mbligh0d0f67d2009-11-06 03:15:03 +0000255 'ssh_user' : self._ssh_user,
256 'ssh_port' : self._ssh_port,
257 'ssh_pass' : self._ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000258 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000259 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000260 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000261 self.record('ABORT', None, None, msg)
262 raise
263
264
265 def repair(self, host_protection):
266 if not self.machines:
267 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000268 if self.resultdir:
269 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000270 namespace = {'machines': self.machines, 'job': self,
mbligh0d0f67d2009-11-06 03:15:03 +0000271 'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
272 'ssh_pass': self._ssh_pass,
jadmanski10646442008-08-13 14:05:21 +0000273 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000274
mbligh0931b0a2009-04-08 17:44:48 +0000275 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000276
277
278 def precheck(self):
279 """
280 perform any additional checks in derived classes.
281 """
282 pass
283
284
285 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000286 """
287 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000288 """
289 pass
290
291
292 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000293 """
294 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000295 """
296 pass
297
298
299 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000300 """
301 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000302 """
303 return False
304
305
mbligh415dc212009-06-15 21:53:34 +0000306 def _make_parallel_wrapper(self, function, machines, log):
307 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000308 is_forking = not (len(machines) == 1 and self.machines == machines)
mbligh0d0f67d2009-11-06 03:15:03 +0000309 if self._parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000310 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000311 self._parse_job += "/" + machine
312 self._using_parser = True
jadmanski10646442008-08-13 14:05:21 +0000313 self.machines = [machine]
mbligh0d0f67d2009-11-06 03:15:03 +0000314 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000315 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000316 self.in_machine_dir = True
showard2bab8f42008-11-12 18:15:22 +0000317 utils.write_keyval(self.resultdir, {"hostname": machine})
mbligh4608b002010-01-05 18:22:35 +0000318 self.init_parser()
jadmanski10646442008-08-13 14:05:21 +0000319 result = function(machine)
320 self.cleanup_parser()
321 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000322 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000323 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000324 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000325 os.chdir(self.resultdir)
mbligh15971eb2009-12-29 02:55:23 +0000326 self.in_machine_dir = True
mbligh838d82d2009-03-11 17:14:31 +0000327 machine_data = {'hostname' : machine,
mbligh0d0f67d2009-11-06 03:15:03 +0000328 'status_version' : str(self._STATUS_VERSION)}
mbligh838d82d2009-03-11 17:14:31 +0000329 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000330 result = function(machine)
331 return result
332 else:
333 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000334 return wrapper
335
336
337 def parallel_simple(self, function, machines, log=True, timeout=None,
338 return_results=False):
339 """
340 Run 'function' using parallel_simple, with an extra wrapper to handle
341 the necessary setup for continuous parsing, if possible. If continuous
342 parsing is already properly initialized then this should just work.
343
344 @param function: A callable to run in parallel given each machine.
345 @param machines: A list of machine names to be passed one per subcommand
346 invocation of function.
347 @param log: If True, output will be written to output in a subdirectory
348 named after each machine.
349 @param timeout: Seconds after which the function call should timeout.
350 @param return_results: If True instead of an AutoServError being raised
351 on any error a list of the results|exceptions from the function
352 called on each arg is returned. [default: False]
353
354 @raises error.AutotestError: If any of the functions failed.
355 """
356 wrapper = self._make_parallel_wrapper(function, machines, log)
357 return subcommand.parallel_simple(wrapper, machines,
358 log=log, timeout=timeout,
359 return_results=return_results)
360
361
362 def parallel_on_machines(self, function, machines, timeout=None):
363 """
showardcd5fac42009-07-06 20:19:43 +0000364 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000365 @param machines: A list of machines to call function(machine) on.
366 @param timeout: Seconds after which the function call should timeout.
367
368 @returns A list of machines on which function(machine) returned
369 without raising an exception.
370 """
showardcd5fac42009-07-06 20:19:43 +0000371 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000372 return_results=True)
373 success_machines = []
374 for result, machine in itertools.izip(results, machines):
375 if not isinstance(result, Exception):
376 success_machines.append(machine)
377 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000378
379
mbligh0d0f67d2009-11-06 03:15:03 +0000380 _USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000381 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000382 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000383 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000384 # for a normal job, make sure the uncollected logs file exists
385 # for a crashinfo-only run it should already exist, bail out otherwise
mbligh0d0f67d2009-11-06 03:15:03 +0000386 if self.resultdir and not os.path.exists(self._uncollected_log_file):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000387 if only_collect_crashinfo:
388 # if this is a crashinfo-only run, and there were no existing
389 # uncollected logs, just bail out early
390 logging.info("No existing uncollected logs, "
391 "skipping crashinfo collection")
392 return
393 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000394 log_file = open(self._uncollected_log_file, "w")
jadmanskifb9c0fa2009-04-29 17:39:16 +0000395 pickle.dump([], log_file)
396 log_file.close()
397
jadmanski10646442008-08-13 14:05:21 +0000398 # use a copy so changes don't affect the original dictionary
399 namespace = namespace.copy()
400 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000401 if control is None:
jadmanski02a3ba22009-11-13 20:47:27 +0000402 if self.control is None:
403 control = ''
404 else:
405 control = self._load_control_file(self.control)
jadmanskie432dd22009-01-30 15:04:51 +0000406 if control_file_dir is None:
407 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000408
409 self.aborted = False
410 namespace['machines'] = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000411 namespace['args'] = self._args
jadmanski10646442008-08-13 14:05:21 +0000412 namespace['job'] = self
mbligh0d0f67d2009-11-06 03:15:03 +0000413 namespace['ssh_user'] = self._ssh_user
414 namespace['ssh_port'] = self._ssh_port
415 namespace['ssh_pass'] = self._ssh_pass
jadmanski10646442008-08-13 14:05:21 +0000416 test_start_time = int(time.time())
417
mbligh80e1eba2008-11-19 00:26:18 +0000418 if self.resultdir:
419 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000420 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000421 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000422 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000423
jadmanskicdd0c402008-09-19 21:21:31 +0000424 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000425 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000426 try:
showardcf8d4922009-10-14 16:08:39 +0000427 try:
428 if install_before and machines:
429 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000430
showardcf8d4922009-10-14 16:08:39 +0000431 if only_collect_crashinfo:
432 return
433
jadmanskidef0c3c2009-03-25 20:07:10 +0000434 # determine the dir to write the control files to
435 cfd_specified = (control_file_dir
mbligh0d0f67d2009-11-06 03:15:03 +0000436 and control_file_dir is not self._USE_TEMP_DIR)
jadmanskidef0c3c2009-03-25 20:07:10 +0000437 if cfd_specified:
438 temp_control_file_dir = None
439 else:
440 temp_control_file_dir = tempfile.mkdtemp(
441 suffix='temp_control_file_dir')
442 control_file_dir = temp_control_file_dir
443 server_control_file = os.path.join(control_file_dir,
444 SERVER_CONTROL_FILENAME)
445 client_control_file = os.path.join(control_file_dir,
446 CLIENT_CONTROL_FILENAME)
mbligh0d0f67d2009-11-06 03:15:03 +0000447 if self._client:
jadmanskidef0c3c2009-03-25 20:07:10 +0000448 namespace['control'] = control
449 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000450 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
451 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000452 else:
453 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000454 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000455 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000456 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000457
jadmanskidef0c3c2009-03-25 20:07:10 +0000458 # no error occured, so we don't need to collect crashinfo
459 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000460 except:
461 try:
462 logging.exception(
463 'Exception escaped control file, job aborting:')
464 except:
465 pass # don't let logging exceptions here interfere
466 raise
jadmanski10646442008-08-13 14:05:21 +0000467 finally:
mblighaebe3b62008-12-22 14:45:40 +0000468 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000469 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000470 try:
471 shutil.rmtree(temp_control_file_dir)
472 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000473 logging.warn('Could not remove temp directory %s: %s',
474 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000475
jadmanskicdd0c402008-09-19 21:21:31 +0000476 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000477 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000478 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000479 # includes crashdumps
480 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000481 else:
mbligh084bc172008-10-18 14:02:45 +0000482 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligh0d0f67d2009-11-06 03:15:03 +0000483 if self._uncollected_log_file:
484 os.remove(self._uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000485 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000486 if cleanup and machines:
487 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000488 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000489 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000490
491
mbligh7eacbc22009-07-28 23:13:56 +0000492 def set_test_tag_prefix(self, tag=''):
493 """
494 Set tag to be prepended (separated by a '.') to test name of all
495 following run_test steps.
496 """
497 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000498
499
jadmanski10646442008-08-13 14:05:21 +0000500 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000501 """
502 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000503
504 tag
505 tag to add to testname
506 url
507 url of the test to run
508 """
509
510 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000511
512 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000513 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000514 tag = self._test_tag_prefix
515 elif self._test_tag_prefix:
516 tag = '%s.%s' % (self._test_tag_prefix, tag)
517
jadmanski10646442008-08-13 14:05:21 +0000518 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000519 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000520 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000521
522 outputdir = os.path.join(self.resultdir, subdir)
523 if os.path.exists(outputdir):
524 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000525 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000526 raise error.TestError(msg)
527 os.mkdir(outputdir)
528
529 def group_func():
530 try:
531 test.runtest(self, url, tag, args, dargs)
532 except error.TestBaseException, e:
533 self.record(e.exit_status, subdir, testname, str(e))
534 raise
535 except Exception, e:
536 info = str(e) + "\n" + traceback.format_exc()
537 self.record('FAIL', subdir, testname, info)
538 raise
539 else:
mbligh2b92b862008-11-22 13:25:32 +0000540 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000541
542 result, exc_info = self._run_group(testname, subdir, group_func)
543 if exc_info and isinstance(exc_info[1], error.TestBaseException):
544 return False
545 elif exc_info:
546 raise exc_info[0], exc_info[1], exc_info[2]
547 else:
548 return True
jadmanski10646442008-08-13 14:05:21 +0000549
550
551 def _run_group(self, name, subdir, function, *args, **dargs):
552 """\
553 Underlying method for running something inside of a group.
554 """
jadmanskide292df2008-08-26 20:51:14 +0000555 result, exc_info = None, None
mbligh0d0f67d2009-11-06 03:15:03 +0000556 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000557 try:
558 self.record('START', subdir, name)
mbligh0d0f67d2009-11-06 03:15:03 +0000559 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000560 try:
561 result = function(*args, **dargs)
562 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000563 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000564 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000565 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000566 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000567 except Exception, e:
568 err_msg = str(e) + '\n'
569 err_msg += traceback.format_exc()
570 self.record('END ABORT', subdir, name, err_msg)
571 raise error.JobError(name + ' failed\n' + traceback.format_exc())
572 else:
573 self.record('END GOOD', subdir, name)
574
jadmanskide292df2008-08-26 20:51:14 +0000575 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000576
577
578 def run_group(self, function, *args, **dargs):
579 """\
580 function:
581 subroutine to run
582 *args:
583 arguments for the function
584 """
585
586 name = function.__name__
587
588 # Allow the tag for the group to be specified.
589 tag = dargs.pop('tag', None)
590 if tag:
591 name = tag
592
jadmanskide292df2008-08-26 20:51:14 +0000593 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000594
595
596 def run_reboot(self, reboot_func, get_kernel_func):
597 """\
598 A specialization of run_group meant specifically for handling
599 a reboot. Includes support for capturing the kernel version
600 after the reboot.
601
602 reboot_func: a function that carries out the reboot
603
604 get_kernel_func: a function that returns a string
605 representing the kernel version.
606 """
607
mbligh0d0f67d2009-11-06 03:15:03 +0000608 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000609 try:
610 self.record('START', None, 'reboot')
mbligh0d0f67d2009-11-06 03:15:03 +0000611 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000612 reboot_func()
613 except Exception, e:
mbligh0d0f67d2009-11-06 03:15:03 +0000614 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000615 err_msg = str(e) + '\n' + traceback.format_exc()
616 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000617 raise
jadmanski10646442008-08-13 14:05:21 +0000618 else:
619 kernel = get_kernel_func()
mbligh0d0f67d2009-11-06 03:15:03 +0000620 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000621 self.record('END GOOD', None, 'reboot',
622 optional_fields={"kernel": kernel})
623
624
jadmanskie432dd22009-01-30 15:04:51 +0000625 def run_control(self, path):
626 """Execute a control file found at path (relative to the autotest
627 path). Intended for executing a control file within a control file,
628 not for running the top-level job control file."""
629 path = os.path.join(self.autodir, path)
630 control_file = self._load_control_file(path)
mbligh0d0f67d2009-11-06 03:15:03 +0000631 self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
jadmanskie432dd22009-01-30 15:04:51 +0000632
633
jadmanskic09fc152008-10-15 17:56:59 +0000634 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000635 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000636 on_every_test)
637
638
639 def add_sysinfo_logfile(self, file, on_every_test=False):
640 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
641
642
643 def _add_sysinfo_loggable(self, loggable, on_every_test):
644 if on_every_test:
645 self.sysinfo.test_loggables.add(loggable)
646 else:
647 self.sysinfo.boot_loggables.add(loggable)
648
649
jadmanski10646442008-08-13 14:05:21 +0000650 def record(self, status_code, subdir, operation, status='',
651 optional_fields=None):
652 """
653 Record job-level status
654
655 The intent is to make this file both machine parseable and
656 human readable. That involves a little more complexity, but
657 really isn't all that bad ;-)
658
659 Format is <status code>\t<subdir>\t<operation>\t<status>
660
mbligh1b3b3762008-09-25 02:46:34 +0000661 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000662 for valid status definition
663
664 subdir: MUST be a relevant subdirectory in the results,
665 or None, which will be represented as '----'
666
667 operation: description of what you ran (e.g. "dbench", or
668 "mkfs -t foobar /dev/sda9")
669
670 status: error message or "completed sucessfully"
671
672 ------------------------------------------------------------
673
674 Initial tabs indicate indent levels for grouping, and is
mbligh0d0f67d2009-11-06 03:15:03 +0000675 governed by self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000676
677 multiline messages have secondary lines prefaced by a double
678 space (' ')
679
680 Executing this method will trigger the logging of all new
681 warnings to date from the various console loggers.
682 """
683 # poll all our warning loggers for new warnings
684 warnings = self._read_warnings()
mbligh0d0f67d2009-11-06 03:15:03 +0000685 old_record_prefix = self._record_prefix
jadmanski2de83112009-04-01 18:21:04 +0000686 try:
687 if status_code.startswith("END "):
mbligh0d0f67d2009-11-06 03:15:03 +0000688 self._record_prefix += "\t"
jadmanski2de83112009-04-01 18:21:04 +0000689 for timestamp, msg in warnings:
690 self._record("WARN", None, None, msg, timestamp)
691 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000692 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000693
694 # write out the actual status log line
695 self._record(status_code, subdir, operation, status,
696 optional_fields=optional_fields)
697
698
699 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000700 """Poll all the warning loggers and extract any new warnings that have
701 been logged. If the warnings belong to a category that is currently
702 disabled, this method will discard them and they will no longer be
703 retrievable.
704
705 Returns a list of (timestamp, message) tuples, where timestamp is an
706 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000707 warnings = []
708 while True:
709 # pull in a line of output from every logger that has
710 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000711 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000712 closed_loggers = set()
713 for logger in loggers:
714 line = logger.readline()
715 # record any broken pipes (aka line == empty)
716 if len(line) == 0:
717 closed_loggers.add(logger)
718 continue
jadmanskif37df842009-02-11 00:03:26 +0000719 # parse out the warning
720 timestamp, msgtype, msg = line.split('\t', 2)
721 timestamp = int(timestamp)
722 # if the warning is valid, add it to the results
723 if self.warning_manager.is_valid(timestamp, msgtype):
724 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000725
726 # stop listening to loggers that are closed
727 self.warning_loggers -= closed_loggers
728
729 # stop if none of the loggers have any output left
730 if not loggers:
731 break
732
733 # sort into timestamp order
734 warnings.sort()
735 return warnings
736
737
jadmanski16a7ff72009-04-01 18:19:53 +0000738 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000739 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000740 self.record("INFO", None, None,
741 "disabling %s warnings" % warning_type,
742 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000743
744
jadmanski16a7ff72009-04-01 18:19:53 +0000745 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000746 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000747 self.record("INFO", None, None,
748 "enabling %s warnings" % warning_type,
749 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000750
751
jadmanski779bd292009-03-19 17:33:33 +0000752 def get_status_log_path(self, subdir=None):
753 """Return the path to the job status log.
754
755 @param subdir - Optional paramter indicating that you want the path
756 to a subdirectory status log.
757
758 @returns The path where the status log should be.
759 """
mbligh210bae62009-04-01 18:33:13 +0000760 if self.resultdir:
761 if subdir:
762 return os.path.join(self.resultdir, subdir, "status.log")
763 else:
764 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000765 else:
mbligh210bae62009-04-01 18:33:13 +0000766 return None
jadmanski779bd292009-03-19 17:33:33 +0000767
768
jadmanski6bb32d72009-03-19 20:25:24 +0000769 def _update_uncollected_logs_list(self, update_func):
770 """Updates the uncollected logs list in a multi-process safe manner.
771
772 @param update_func - a function that updates the list of uncollected
773 logs. Should take one parameter, the list to be updated.
774 """
mbligh0d0f67d2009-11-06 03:15:03 +0000775 if self._uncollected_log_file:
776 log_file = open(self._uncollected_log_file, "r+")
mbligha788dc42009-03-26 21:10:16 +0000777 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000778 try:
779 uncollected_logs = pickle.load(log_file)
780 update_func(uncollected_logs)
781 log_file.seek(0)
782 log_file.truncate()
783 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000784 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000785 finally:
786 fcntl.flock(log_file, fcntl.LOCK_UN)
787 log_file.close()
788
789
790 def add_client_log(self, hostname, remote_path, local_path):
791 """Adds a new set of client logs to the list of uncollected logs,
792 to allow for future log recovery.
793
794 @param host - the hostname of the machine holding the logs
795 @param remote_path - the directory on the remote machine holding logs
796 @param local_path - the local directory to copy the logs into
797 """
798 def update_func(logs_list):
799 logs_list.append((hostname, remote_path, local_path))
800 self._update_uncollected_logs_list(update_func)
801
802
803 def remove_client_log(self, hostname, remote_path, local_path):
804 """Removes a set of client logs from the list of uncollected logs,
805 to allow for future log recovery.
806
807 @param host - the hostname of the machine holding the logs
808 @param remote_path - the directory on the remote machine holding logs
809 @param local_path - the local directory to copy the logs into
810 """
811 def update_func(logs_list):
812 logs_list.remove((hostname, remote_path, local_path))
813 self._update_uncollected_logs_list(update_func)
814
815
mbligh0d0f67d2009-11-06 03:15:03 +0000816 def get_client_logs(self):
817 """Retrieves the list of uncollected logs, if it exists.
818
819 @returns A list of (host, remote_path, local_path) tuples. Returns
820 an empty list if no uncollected logs file exists.
821 """
822 log_exists = (self._uncollected_log_file and
823 os.path.exists(self._uncollected_log_file))
824 if log_exists:
825 return pickle.load(open(self._uncollected_log_file))
826 else:
827 return []
828
829
jadmanski10646442008-08-13 14:05:21 +0000830 def _render_record(self, status_code, subdir, operation, status='',
831 epoch_time=None, record_prefix=None,
832 optional_fields=None):
833 """
834 Internal Function to generate a record to be written into a
835 status log. For use by server_job.* classes only.
836 """
837 if subdir:
838 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000839 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000840 substr = subdir
841 else:
842 substr = '----'
843
mbligh1b3b3762008-09-25 02:46:34 +0000844 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000845 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000846 if not operation:
847 operation = '----'
848 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000849 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000850 operation = operation.rstrip()
851 status = status.rstrip()
852 status = re.sub(r"\t", " ", status)
853 # Ensure any continuation lines are marked so we can
854 # detect them in the status file to ensure it is parsable.
mbligh0d0f67d2009-11-06 03:15:03 +0000855 status = re.sub(r"\n", "\n" + self._record_prefix + " ", status)
jadmanski10646442008-08-13 14:05:21 +0000856
857 if not optional_fields:
858 optional_fields = {}
859
860 # Generate timestamps for inclusion in the logs
861 if epoch_time is None:
862 epoch_time = int(time.time())
863 local_time = time.localtime(epoch_time)
864 optional_fields["timestamp"] = str(epoch_time)
865 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
866 local_time)
867
868 fields = [status_code, substr, operation]
869 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
870 fields.append(status)
871
872 if record_prefix is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000873 record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000874
875 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000876 return record_prefix + msg + '\n'
877
878
879 def _record_prerendered(self, msg):
880 """
881 Record a pre-rendered msg into the status logs. The only
882 change this makes to the message is to add on the local
883 indentation. Should not be called outside of server_job.*
884 classes. Unlike _record, this does not write the message
885 to standard output.
886 """
887 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000888 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000889 status_log = open(status_file, 'a')
890 for line in msg.splitlines():
mbligh0d0f67d2009-11-06 03:15:03 +0000891 line = self._record_prefix + line + '\n'
jadmanski10646442008-08-13 14:05:21 +0000892 lines.append(line)
893 status_log.write(line)
894 status_log.close()
895 self.__parse_status(lines)
896
897
mbligh084bc172008-10-18 14:02:45 +0000898 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000899 """
900 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000901
902 This sets up the control file API by importing modules and making them
903 available under the appropriate names within namespace.
904
905 For use by _execute_code().
906
907 Args:
908 namespace: The namespace dictionary to fill in.
909 protect: Boolean. If True (the default) any operation that would
910 clobber an existing entry in namespace will cause an error.
911 Raises:
912 error.AutoservError: When a name would be clobbered by import.
913 """
914 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000915 """
916 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000917
918 Args:
919 module_name: The string module name.
920 names: A limiting list of names to import from module_name. If
921 empty (the default), all names are imported from the module
922 similar to a "from foo.bar import *" statement.
923 Raises:
924 error.AutoservError: When a name being imported would clobber
925 a name already in namespace.
926 """
927 module = __import__(module_name, {}, {}, names)
928
929 # No names supplied? Import * from the lowest level module.
930 # (Ugh, why do I have to implement this part myself?)
931 if not names:
932 for submodule_name in module_name.split('.')[1:]:
933 module = getattr(module, submodule_name)
934 if hasattr(module, '__all__'):
935 names = getattr(module, '__all__')
936 else:
937 names = dir(module)
938
939 # Install each name into namespace, checking to make sure it
940 # doesn't override anything that already exists.
941 for name in names:
942 # Check for conflicts to help prevent future problems.
943 if name in namespace and protect:
944 if namespace[name] is not getattr(module, name):
945 raise error.AutoservError('importing name '
946 '%s from %s %r would override %r' %
947 (name, module_name, getattr(module, name),
948 namespace[name]))
949 else:
950 # Encourage cleanliness and the use of __all__ for a
951 # more concrete API with less surprises on '*' imports.
952 warnings.warn('%s (%r) being imported from %s for use '
953 'in server control files is not the '
954 'first occurrance of that import.' %
955 (name, namespace[name], module_name))
956
957 namespace[name] = getattr(module, name)
958
959
960 # This is the equivalent of prepending a bunch of import statements to
961 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000962 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000963 _import_names('autotest_lib.server',
964 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
965 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
966 _import_names('autotest_lib.server.subcommand',
967 ('parallel', 'parallel_simple', 'subcommand'))
968 _import_names('autotest_lib.server.utils',
969 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
970 _import_names('autotest_lib.client.common_lib.error')
971 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
972
973 # Inject ourself as the job object into other classes within the API.
974 # (Yuck, this injection is a gross thing be part of a public API. -gps)
975 #
976 # XXX Base & SiteAutotest do not appear to use .job. Who does?
977 namespace['autotest'].Autotest.job = self
978 # server.hosts.base_classes.Host uses .job.
979 namespace['hosts'].Host.job = self
980
981
982 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000983 """
984 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000985
986 Unless protect_namespace is explicitly set to False, the dict will not
987 be modified.
988
989 Args:
990 code_file: The filename of the control file to execute.
991 namespace: A dict containing names to make available during execution.
992 protect: Boolean. If True (the default) a copy of the namespace dict
993 is used during execution to prevent the code from modifying its
994 contents outside of this function. If False the raw dict is
995 passed in and modifications will be allowed.
996 """
997 if protect:
998 namespace = namespace.copy()
999 self._fill_server_control_namespace(namespace, protect=protect)
1000 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001001 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001002 machines_text = '\n'.join(self.machines) + '\n'
1003 # Only rewrite the file if it does not match our machine list.
1004 try:
1005 machines_f = open(MACHINES_FILENAME, 'r')
1006 existing_machines_text = machines_f.read()
1007 machines_f.close()
1008 except EnvironmentError:
1009 existing_machines_text = None
1010 if machines_text != existing_machines_text:
1011 utils.open_write_close(MACHINES_FILENAME, machines_text)
1012 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001013
1014
1015 def _record(self, status_code, subdir, operation, status='',
1016 epoch_time=None, optional_fields=None):
1017 """
1018 Actual function for recording a single line into the status
1019 logs. Should never be called directly, only by job.record as
1020 this would bypass the console monitor logging.
1021 """
1022
mbligh2b92b862008-11-22 13:25:32 +00001023 msg = self._render_record(status_code, subdir, operation, status,
1024 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001025
jadmanski779bd292009-03-19 17:33:33 +00001026 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001027 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001028 if status_file:
1029 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001030 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001031 sub_status_file = self.get_status_log_path(subdir)
1032 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001033 self.__parse_status(msg.splitlines())
1034
1035
1036 def __parse_status(self, new_lines):
mbligh0d0f67d2009-11-06 03:15:03 +00001037 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +00001038 return
1039 new_tests = self.parser.process_lines(new_lines)
1040 for test in new_tests:
1041 self.__insert_test(test)
1042
1043
1044 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001045 """
1046 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001047 database. This method will not raise an exception, even if an
1048 error occurs during the insert, to avoid failing a test
1049 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001050 self.num_tests_run += 1
1051 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1052 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001053 try:
1054 self.results_db.insert_test(self.job_model, test)
1055 except Exception:
1056 msg = ("WARNING: An unexpected error occured while "
1057 "inserting test results into the database. "
1058 "Ignoring error.\n" + traceback.format_exc())
1059 print >> sys.stderr, msg
1060
mblighcaa62c22008-04-07 21:51:17 +00001061
mbligha7007722009-01-13 00:37:11 +00001062site_server_job = utils.import_site_class(
1063 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1064 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001065
mbligh0a8c3322009-04-28 18:32:19 +00001066class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001067 pass
jadmanskif37df842009-02-11 00:03:26 +00001068
1069
1070class warning_manager(object):
1071 """Class for controlling warning logs. Manages the enabling and disabling
1072 of warnings."""
1073 def __init__(self):
1074 # a map of warning types to a list of disabled time intervals
1075 self.disabled_warnings = {}
1076
1077
1078 def is_valid(self, timestamp, warning_type):
1079 """Indicates if a warning (based on the time it occured and its type)
1080 is a valid warning. A warning is considered "invalid" if this type of
1081 warning was marked as "disabled" at the time the warning occured."""
1082 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1083 for start, end in disabled_intervals:
1084 if timestamp >= start and (end is None or timestamp < end):
1085 return False
1086 return True
1087
1088
1089 def disable_warnings(self, warning_type, current_time_func=time.time):
1090 """As of now, disables all further warnings of this type."""
1091 intervals = self.disabled_warnings.setdefault(warning_type, [])
1092 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001093 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001094
1095
1096 def enable_warnings(self, warning_type, current_time_func=time.time):
1097 """As of now, enables all further warnings of this type."""
1098 intervals = self.disabled_warnings.get(warning_type, [])
1099 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001100 intervals[-1] = (intervals[-1][0], int(current_time_func()))