blob: add37e019fd03aa6ef2aa4eb27f43e828703ed89 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt66a8cb92010-03-31 13:21:56 -040030 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
70 cnt = ring_buffer_entries(tr->buffer);
71
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400115 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400133 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400142 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
151};
152
153static struct ftrace_ops test_probe2 = {
154 .func = trace_selftest_test_probe2_func,
155};
156
157static struct ftrace_ops test_probe3 = {
158 .func = trace_selftest_test_probe3_func,
159};
160
161static struct ftrace_ops test_global = {
162 .func = trace_selftest_test_global_func,
163 .flags = FTRACE_OPS_FL_GLOBAL,
164};
165
166static void print_counts(void)
167{
168 printk("(%d %d %d %d %d) ",
169 trace_selftest_test_probe1_cnt,
170 trace_selftest_test_probe2_cnt,
171 trace_selftest_test_probe3_cnt,
172 trace_selftest_test_global_cnt,
173 trace_selftest_test_dyn_cnt);
174}
175
176static void reset_counts(void)
177{
178 trace_selftest_test_probe1_cnt = 0;
179 trace_selftest_test_probe2_cnt = 0;
180 trace_selftest_test_probe3_cnt = 0;
181 trace_selftest_test_global_cnt = 0;
182 trace_selftest_test_dyn_cnt = 0;
183}
184
185static int trace_selftest_ops(int cnt)
186{
187 int save_ftrace_enabled = ftrace_enabled;
188 struct ftrace_ops *dyn_ops;
189 char *func1_name;
190 char *func2_name;
191 int len1;
192 int len2;
193 int ret = -1;
194
195 printk(KERN_CONT "PASSED\n");
196 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
197
198 ftrace_enabled = 1;
199 reset_counts();
200
201 /* Handle PPC64 '.' name */
202 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
203 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
204 len1 = strlen(func1_name);
205 len2 = strlen(func2_name);
206
207 /*
208 * Probe 1 will trace function 1.
209 * Probe 2 will trace function 2.
210 * Probe 3 will trace functions 1 and 2.
211 */
212 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
213 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
214 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
215 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
216
217 register_ftrace_function(&test_probe1);
218 register_ftrace_function(&test_probe2);
219 register_ftrace_function(&test_probe3);
220 register_ftrace_function(&test_global);
221
222 DYN_FTRACE_TEST_NAME();
223
224 print_counts();
225
226 if (trace_selftest_test_probe1_cnt != 1)
227 goto out;
228 if (trace_selftest_test_probe2_cnt != 0)
229 goto out;
230 if (trace_selftest_test_probe3_cnt != 1)
231 goto out;
232 if (trace_selftest_test_global_cnt == 0)
233 goto out;
234
235 DYN_FTRACE_TEST_NAME2();
236
237 print_counts();
238
239 if (trace_selftest_test_probe1_cnt != 1)
240 goto out;
241 if (trace_selftest_test_probe2_cnt != 1)
242 goto out;
243 if (trace_selftest_test_probe3_cnt != 2)
244 goto out;
245
246 /* Add a dynamic probe */
247 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
248 if (!dyn_ops) {
249 printk("MEMORY ERROR ");
250 goto out;
251 }
252
253 dyn_ops->func = trace_selftest_test_dyn_func;
254
255 register_ftrace_function(dyn_ops);
256
257 trace_selftest_test_global_cnt = 0;
258
259 DYN_FTRACE_TEST_NAME();
260
261 print_counts();
262
263 if (trace_selftest_test_probe1_cnt != 2)
264 goto out_free;
265 if (trace_selftest_test_probe2_cnt != 1)
266 goto out_free;
267 if (trace_selftest_test_probe3_cnt != 3)
268 goto out_free;
269 if (trace_selftest_test_global_cnt == 0)
270 goto out;
271 if (trace_selftest_test_dyn_cnt == 0)
272 goto out_free;
273
274 DYN_FTRACE_TEST_NAME2();
275
276 print_counts();
277
278 if (trace_selftest_test_probe1_cnt != 2)
279 goto out_free;
280 if (trace_selftest_test_probe2_cnt != 2)
281 goto out_free;
282 if (trace_selftest_test_probe3_cnt != 4)
283 goto out_free;
284
285 ret = 0;
286 out_free:
287 unregister_ftrace_function(dyn_ops);
288 kfree(dyn_ops);
289
290 out:
291 /* Purposely unregister in the same order */
292 unregister_ftrace_function(&test_probe1);
293 unregister_ftrace_function(&test_probe2);
294 unregister_ftrace_function(&test_probe3);
295 unregister_ftrace_function(&test_global);
296
297 /* Make sure everything is off */
298 reset_counts();
299 DYN_FTRACE_TEST_NAME();
300 DYN_FTRACE_TEST_NAME();
301
302 if (trace_selftest_test_probe1_cnt ||
303 trace_selftest_test_probe2_cnt ||
304 trace_selftest_test_probe3_cnt ||
305 trace_selftest_test_global_cnt ||
306 trace_selftest_test_dyn_cnt)
307 ret = -1;
308
309 ftrace_enabled = save_ftrace_enabled;
310
311 return ret;
312}
313
Steven Rostedt77a2b372008-05-12 21:20:45 +0200314/* Test dynamic code modification and ftrace filters */
315int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
316 struct trace_array *tr,
317 int (*func)(void))
318{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200319 int save_ftrace_enabled = ftrace_enabled;
320 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400321 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400322 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400323 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200324
325 /* The ftrace test PASSED */
326 printk(KERN_CONT "PASSED\n");
327 pr_info("Testing dynamic ftrace: ");
328
329 /* enable tracing, and record the filter function */
330 ftrace_enabled = 1;
331 tracer_enabled = 1;
332
333 /* passed in by parameter to fool gcc from optimizing */
334 func();
335
Steven Rostedt4e491d12008-05-14 23:49:44 -0400336 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500337 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400338 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500339 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400340 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400341 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400342
Steven Rostedt77a2b372008-05-12 21:20:45 +0200343 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400344 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200345
346 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200347 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100348 if (ret) {
349 warn_failed_init_tracer(trace, ret);
350 goto out;
351 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400352
Steven Rostedt77a2b372008-05-12 21:20:45 +0200353 /* Sleep for a 1/10 of a second */
354 msleep(100);
355
356 /* we should have nothing in the buffer */
357 ret = trace_test_buffer(tr, &count);
358 if (ret)
359 goto out;
360
361 if (count) {
362 ret = -1;
363 printk(KERN_CONT ".. filter did not filter .. ");
364 goto out;
365 }
366
367 /* call our function again */
368 func();
369
370 /* sleep again */
371 msleep(100);
372
373 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500374 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200375 ftrace_enabled = 0;
376
377 /* check the trace buffer */
378 ret = trace_test_buffer(tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500379 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200380
381 /* we should only have one item */
382 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400383 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200384 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200385 ret = -1;
386 goto out;
387 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500388
Steven Rostedt95950c22011-05-06 00:08:51 -0400389 /* Test the ops with global tracing running */
390 ret = trace_selftest_ops(1);
391 trace->reset(tr);
392
Steven Rostedt77a2b372008-05-12 21:20:45 +0200393 out:
394 ftrace_enabled = save_ftrace_enabled;
395 tracer_enabled = save_tracer_enabled;
396
397 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400398 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200399
Steven Rostedt95950c22011-05-06 00:08:51 -0400400 /* Test the ops with global tracing off */
401 if (!ret)
402 ret = trace_selftest_ops(2);
403
Steven Rostedt77a2b372008-05-12 21:20:45 +0200404 return ret;
405}
406#else
407# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
408#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100409
Steven Rostedt60a11772008-05-12 21:20:44 +0200410/*
411 * Simple verification test of ftrace function tracer.
412 * Enable ftrace, sleep 1/10 second, and then read the trace
413 * buffer to see if all is in order.
414 */
415int
416trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
417{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200418 int save_ftrace_enabled = ftrace_enabled;
419 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400420 unsigned long count;
421 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200422
Steven Rostedt77a2b372008-05-12 21:20:45 +0200423 /* make sure msleep has been recorded */
424 msleep(1);
425
Steven Rostedt60a11772008-05-12 21:20:44 +0200426 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200427 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200428 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200429
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200430 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100431 if (ret) {
432 warn_failed_init_tracer(trace, ret);
433 goto out;
434 }
435
Steven Rostedt60a11772008-05-12 21:20:44 +0200436 /* Sleep for a 1/10 of a second */
437 msleep(100);
438 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500439 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200440 ftrace_enabled = 0;
441
Steven Rostedt60a11772008-05-12 21:20:44 +0200442 /* check the trace buffer */
443 ret = trace_test_buffer(tr, &count);
444 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500445 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200446
447 if (!ret && !count) {
448 printk(KERN_CONT ".. no entries found ..");
449 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200450 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200451 }
452
Steven Rostedt77a2b372008-05-12 21:20:45 +0200453 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
454 DYN_FTRACE_TEST_NAME);
455
456 out:
457 ftrace_enabled = save_ftrace_enabled;
458 tracer_enabled = save_tracer_enabled;
459
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200460 /* kill ftrace totally if we failed */
461 if (ret)
462 ftrace_kill();
463
Steven Rostedt60a11772008-05-12 21:20:44 +0200464 return ret;
465}
Steven Rostedt606576c2008-10-06 19:06:12 -0400466#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200467
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100468
469#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100470
471/* Maximum number of functions to trace before diagnosing a hang */
472#define GRAPH_MAX_FUNC_TEST 100000000
473
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200474static void
475__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100476static unsigned int graph_hang_thresh;
477
478/* Wrap the real function entry probe to avoid possible hanging */
479static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
480{
481 /* This is harmlessly racy, we want to approximately detect a hang */
482 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
483 ftrace_graph_stop();
484 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
485 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200486 __ftrace_dump(false, DUMP_ALL);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100487 return 0;
488 }
489
490 return trace_graph_entry(trace);
491}
492
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100493/*
494 * Pretty much the same than for the function tracer from which the selftest
495 * has been borrowed.
496 */
497int
498trace_selftest_startup_function_graph(struct tracer *trace,
499 struct trace_array *tr)
500{
501 int ret;
502 unsigned long count;
503
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100504 /*
505 * Simulate the init() callback but we attach a watchdog callback
506 * to detect and recover from possible hangs
507 */
508 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200509 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100510 ret = register_ftrace_graph(&trace_graph_return,
511 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100512 if (ret) {
513 warn_failed_init_tracer(trace, ret);
514 goto out;
515 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100516 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100517
518 /* Sleep for a 1/10 of a second */
519 msleep(100);
520
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100521 /* Have we just recovered from a hang? */
522 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100523 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100524 ret = -1;
525 goto out;
526 }
527
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100528 tracing_stop();
529
530 /* check the trace buffer */
531 ret = trace_test_buffer(tr, &count);
532
533 trace->reset(tr);
534 tracing_start();
535
536 if (!ret && !count) {
537 printk(KERN_CONT ".. no entries found ..");
538 ret = -1;
539 goto out;
540 }
541
542 /* Don't test dynamic tracing, the function tracer already did */
543
544out:
545 /* Stop it if we failed */
546 if (ret)
547 ftrace_graph_stop();
548
549 return ret;
550}
551#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
552
553
Steven Rostedt60a11772008-05-12 21:20:44 +0200554#ifdef CONFIG_IRQSOFF_TRACER
555int
556trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
557{
558 unsigned long save_max = tracing_max_latency;
559 unsigned long count;
560 int ret;
561
562 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200563 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100564 if (ret) {
565 warn_failed_init_tracer(trace, ret);
566 return ret;
567 }
568
Steven Rostedt60a11772008-05-12 21:20:44 +0200569 /* reset the max latency */
570 tracing_max_latency = 0;
571 /* disable interrupts for a bit */
572 local_irq_disable();
573 udelay(100);
574 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100575
576 /*
577 * Stop the tracer to avoid a warning subsequent
578 * to buffer flipping failure because tracing_stop()
579 * disables the tr and max buffers, making flipping impossible
580 * in case of parallels max irqs off latencies.
581 */
582 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200583 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500584 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200585 /* check both trace buffers */
586 ret = trace_test_buffer(tr, NULL);
587 if (!ret)
588 ret = trace_test_buffer(&max_tr, &count);
589 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500590 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200591
592 if (!ret && !count) {
593 printk(KERN_CONT ".. no entries found ..");
594 ret = -1;
595 }
596
597 tracing_max_latency = save_max;
598
599 return ret;
600}
601#endif /* CONFIG_IRQSOFF_TRACER */
602
603#ifdef CONFIG_PREEMPT_TRACER
604int
605trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
606{
607 unsigned long save_max = tracing_max_latency;
608 unsigned long count;
609 int ret;
610
Steven Rostedt769c48e2008-11-07 22:36:02 -0500611 /*
612 * Now that the big kernel lock is no longer preemptable,
613 * and this is called with the BKL held, it will always
614 * fail. If preemption is already disabled, simply
615 * pass the test. When the BKL is removed, or becomes
616 * preemptible again, we will once again test this,
617 * so keep it in.
618 */
619 if (preempt_count()) {
620 printk(KERN_CONT "can not test ... force ");
621 return 0;
622 }
623
Steven Rostedt60a11772008-05-12 21:20:44 +0200624 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200625 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100626 if (ret) {
627 warn_failed_init_tracer(trace, ret);
628 return ret;
629 }
630
Steven Rostedt60a11772008-05-12 21:20:44 +0200631 /* reset the max latency */
632 tracing_max_latency = 0;
633 /* disable preemption for a bit */
634 preempt_disable();
635 udelay(100);
636 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100637
638 /*
639 * Stop the tracer to avoid a warning subsequent
640 * to buffer flipping failure because tracing_stop()
641 * disables the tr and max buffers, making flipping impossible
642 * in case of parallels max preempt off latencies.
643 */
644 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200645 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500646 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200647 /* check both trace buffers */
648 ret = trace_test_buffer(tr, NULL);
649 if (!ret)
650 ret = trace_test_buffer(&max_tr, &count);
651 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500652 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200653
654 if (!ret && !count) {
655 printk(KERN_CONT ".. no entries found ..");
656 ret = -1;
657 }
658
659 tracing_max_latency = save_max;
660
661 return ret;
662}
663#endif /* CONFIG_PREEMPT_TRACER */
664
665#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
666int
667trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
668{
669 unsigned long save_max = tracing_max_latency;
670 unsigned long count;
671 int ret;
672
Steven Rostedt769c48e2008-11-07 22:36:02 -0500673 /*
674 * Now that the big kernel lock is no longer preemptable,
675 * and this is called with the BKL held, it will always
676 * fail. If preemption is already disabled, simply
677 * pass the test. When the BKL is removed, or becomes
678 * preemptible again, we will once again test this,
679 * so keep it in.
680 */
681 if (preempt_count()) {
682 printk(KERN_CONT "can not test ... force ");
683 return 0;
684 }
685
Steven Rostedt60a11772008-05-12 21:20:44 +0200686 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200687 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100688 if (ret) {
689 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100690 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100691 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200692
693 /* reset the max latency */
694 tracing_max_latency = 0;
695
696 /* disable preemption and interrupts for a bit */
697 preempt_disable();
698 local_irq_disable();
699 udelay(100);
700 preempt_enable();
701 /* reverse the order of preempt vs irqs */
702 local_irq_enable();
703
Frederic Weisbecker49036202009-03-17 22:38:58 +0100704 /*
705 * Stop the tracer to avoid a warning subsequent
706 * to buffer flipping failure because tracing_stop()
707 * disables the tr and max buffers, making flipping impossible
708 * in case of parallels max irqs/preempt off latencies.
709 */
710 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200711 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500712 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200713 /* check both trace buffers */
714 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100715 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200716 goto out;
717
718 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100719 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200720 goto out;
721
722 if (!ret && !count) {
723 printk(KERN_CONT ".. no entries found ..");
724 ret = -1;
725 goto out;
726 }
727
728 /* do the test by disabling interrupts first this time */
729 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500730 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100731 trace->start(tr);
732
Steven Rostedt60a11772008-05-12 21:20:44 +0200733 preempt_disable();
734 local_irq_disable();
735 udelay(100);
736 preempt_enable();
737 /* reverse the order of preempt vs irqs */
738 local_irq_enable();
739
Frederic Weisbecker49036202009-03-17 22:38:58 +0100740 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200741 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500742 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200743 /* check both trace buffers */
744 ret = trace_test_buffer(tr, NULL);
745 if (ret)
746 goto out;
747
748 ret = trace_test_buffer(&max_tr, &count);
749
750 if (!ret && !count) {
751 printk(KERN_CONT ".. no entries found ..");
752 ret = -1;
753 goto out;
754 }
755
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100756out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500757 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100758out_no_start:
759 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200760 tracing_max_latency = save_max;
761
762 return ret;
763}
764#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
765
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700766#ifdef CONFIG_NOP_TRACER
767int
768trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
769{
770 /* What could possibly go wrong? */
771 return 0;
772}
773#endif
774
Steven Rostedt60a11772008-05-12 21:20:44 +0200775#ifdef CONFIG_SCHED_TRACER
776static int trace_wakeup_test_thread(void *data)
777{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200778 /* Make this a RT thread, doesn't need to be too high */
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100779 static const struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200780 struct completion *x = data;
781
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200782 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200783
784 /* Make it know we have a new prio */
785 complete(x);
786
787 /* now go to sleep and let the test wake us up */
788 set_current_state(TASK_INTERRUPTIBLE);
789 schedule();
790
791 /* we are awake, now wait to disappear */
792 while (!kthread_should_stop()) {
793 /*
794 * This is an RT task, do short sleeps to let
795 * others run.
796 */
797 msleep(100);
798 }
799
800 return 0;
801}
802
803int
804trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
805{
806 unsigned long save_max = tracing_max_latency;
807 struct task_struct *p;
808 struct completion isrt;
809 unsigned long count;
810 int ret;
811
812 init_completion(&isrt);
813
814 /* create a high prio thread */
815 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200816 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200817 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
818 return -1;
819 }
820
821 /* make sure the thread is running at an RT prio */
822 wait_for_completion(&isrt);
823
824 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200825 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100826 if (ret) {
827 warn_failed_init_tracer(trace, ret);
828 return ret;
829 }
830
Steven Rostedt60a11772008-05-12 21:20:44 +0200831 /* reset the max latency */
832 tracing_max_latency = 0;
833
834 /* sleep to let the RT thread sleep too */
835 msleep(100);
836
837 /*
838 * Yes this is slightly racy. It is possible that for some
839 * strange reason that the RT thread we created, did not
840 * call schedule for 100ms after doing the completion,
841 * and we do a wakeup on a task that already is awake.
842 * But that is extremely unlikely, and the worst thing that
843 * happens in such a case, is that we disable tracing.
844 * Honestly, if this race does happen something is horrible
845 * wrong with the system.
846 */
847
848 wake_up_process(p);
849
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400850 /* give a little time to let the thread wake up */
851 msleep(100);
852
Steven Rostedt60a11772008-05-12 21:20:44 +0200853 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500854 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200855 /* check both trace buffers */
856 ret = trace_test_buffer(tr, NULL);
857 if (!ret)
858 ret = trace_test_buffer(&max_tr, &count);
859
860
861 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500862 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200863
864 tracing_max_latency = save_max;
865
866 /* kill the thread */
867 kthread_stop(p);
868
869 if (!ret && !count) {
870 printk(KERN_CONT ".. no entries found ..");
871 ret = -1;
872 }
873
874 return ret;
875}
876#endif /* CONFIG_SCHED_TRACER */
877
878#ifdef CONFIG_CONTEXT_SWITCH_TRACER
879int
880trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
881{
882 unsigned long count;
883 int ret;
884
885 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200886 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100887 if (ret) {
888 warn_failed_init_tracer(trace, ret);
889 return ret;
890 }
891
Steven Rostedt60a11772008-05-12 21:20:44 +0200892 /* Sleep for a 1/10 of a second */
893 msleep(100);
894 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500895 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200896 /* check the trace buffer */
897 ret = trace_test_buffer(tr, &count);
898 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500899 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200900
901 if (!ret && !count) {
902 printk(KERN_CONT ".. no entries found ..");
903 ret = -1;
904 }
905
906 return ret;
907}
908#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200909
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500910#ifdef CONFIG_BRANCH_TRACER
911int
912trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
913{
914 unsigned long count;
915 int ret;
916
917 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200918 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100919 if (ret) {
920 warn_failed_init_tracer(trace, ret);
921 return ret;
922 }
923
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500924 /* Sleep for a 1/10 of a second */
925 msleep(100);
926 /* stop the tracing. */
927 tracing_stop();
928 /* check the trace buffer */
929 ret = trace_test_buffer(tr, &count);
930 trace->reset(tr);
931 tracing_start();
932
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500933 if (!ret && !count) {
934 printk(KERN_CONT ".. no entries found ..");
935 ret = -1;
936 }
937
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500938 return ret;
939}
940#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100941