blob: a1b50add48a299feb1b3c88b8a2d1c73ce477c85 [file] [log] [blame]
Jeff Dike995473a2006-09-27 01:50:40 -07001/*
Jeff Dikeba180fd2007-10-16 01:27:00 -07002 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
Jeff Dikec5d4bb12008-02-04 22:31:14 -08007#include <linux/stddef.h>
8#include <linux/err.h>
9#include <linux/hardirq.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080010#include <linux/mm.h>
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -080011#include <linux/module.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080012#include <linux/personality.h>
13#include <linux/proc_fs.h>
14#include <linux/ptrace.h>
15#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080017#include <linux/sched.h>
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -080018#include <linux/seq_file.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080019#include <linux/tick.h>
20#include <linux/threads.h>
Al Virod50349b2012-04-24 02:37:07 -040021#include <linux/tracehook.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080022#include <asm/current.h>
23#include <asm/pgtable.h>
Al Viro445c5782011-08-18 20:07:59 +010024#include <asm/mmu_context.h>
Jeff Dikec5d4bb12008-02-04 22:31:14 -080025#include <asm/uaccess.h>
Jeff Dike4ff83ce2007-05-06 14:51:08 -070026#include "as-layout.h"
Jeff Dikeba180fd2007-10-16 01:27:00 -070027#include "kern_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "os.h"
Jeff Dike77bf4402007-10-16 01:26:58 -070029#include "skas.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Jeff Dikeba180fd2007-10-16 01:27:00 -070031/*
32 * This is a per-cpu array. A processor only modifies its entry and it only
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * cares about its entry, so it's OK if another processor is modifying its
34 * entry.
35 */
36struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37
Karol Swietlicki2dc58022008-02-04 22:31:03 -080038static inline int external_pid(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039{
Jeff Dike77bf4402007-10-16 01:26:58 -070040 /* FIXME: Need to look up userspace_pid by cpu */
Jeff Dikeba180fd2007-10-16 01:27:00 -070041 return userspace_pid[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
44int pid_to_processor_id(int pid)
45{
46 int i;
47
Jeff Dikec5d4bb12008-02-04 22:31:14 -080048 for (i = 0; i < ncpus; i++) {
Jeff Dikeba180fd2007-10-16 01:27:00 -070049 if (cpu_tasks[i].pid == pid)
Jeff Dike6e21aec2007-05-06 14:51:21 -070050 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 }
Jeff Dike6e21aec2007-05-06 14:51:21 -070052 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
55void free_stack(unsigned long stack, int order)
56{
57 free_pages(stack, order);
58}
59
60unsigned long alloc_stack(int order, int atomic)
61{
62 unsigned long page;
Al Viro53f9fc92005-10-21 03:22:24 -040063 gfp_t flags = GFP_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Paolo 'Blaisorblade' Giarrusso46db4a42d2005-09-22 21:44:20 -070065 if (atomic)
66 flags = GFP_ATOMIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 page = __get_free_pages(flags, order);
Jeff Dike5c8aace2007-10-16 01:26:46 -070068
Jeff Dike6e21aec2007-05-06 14:51:21 -070069 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
Jeff Dike6e21aec2007-05-06 14:51:21 -070072static inline void set_current(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
Al Viroca9bc0b2006-01-12 01:05:48 -080074 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
Karol Swietlicki2dc58022008-02-04 22:31:03 -080075 { external_pid(), task });
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
Karol Swietlicki291248f2008-02-04 22:30:49 -080078extern void arch_switch_to(struct task_struct *to);
Jeff Dike77bf4402007-10-16 01:26:58 -070079
Richard Weinberger76b278e2012-03-29 19:10:42 +020080void *__switch_to(struct task_struct *from, struct task_struct *to)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
Jeff Dike995473a2006-09-27 01:50:40 -070082 to->thread.prev_sched = from;
83 set_current(to);
Jeff Dikef6e34c62005-09-16 19:27:43 -070084
Jeff Dike3eddddc2005-09-16 19:27:46 -070085 do {
Jeff Dike6aa802c2007-10-16 01:26:56 -070086 current->thread.saved_task = NULL;
Jeff Dike77bf4402007-10-16 01:26:58 -070087
Jeff Dikec5d4bb12008-02-04 22:31:14 -080088 switch_threads(&from->thread.switch_buf,
89 &to->thread.switch_buf);
Jeff Dike77bf4402007-10-16 01:26:58 -070090
Karol Swietlicki291248f2008-02-04 22:30:49 -080091 arch_switch_to(current);
Jeff Dike77bf4402007-10-16 01:26:58 -070092
Jeff Dikeba180fd2007-10-16 01:27:00 -070093 if (current->thread.saved_task)
Jeff Dike3eddddc2005-09-16 19:27:46 -070094 show_regs(&(current->thread.regs));
Jeff Dikec5d4bb12008-02-04 22:31:14 -080095 to = current->thread.saved_task;
96 from = current;
Karol Swietlicki291248f2008-02-04 22:30:49 -080097 } while (current->thread.saved_task);
Jeff Dikef6e34c62005-09-16 19:27:43 -070098
Jeff Dike6e21aec2007-05-06 14:51:21 -070099 return current->thread.prev_sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
102void interrupt_end(void)
103{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700104 if (need_resched())
Jeff Dike6e21aec2007-05-06 14:51:21 -0700105 schedule();
Al Virod50349b2012-04-24 02:37:07 -0400106 if (test_thread_flag(TIF_SIGPENDING))
Jeff Dike6e21aec2007-05-06 14:51:21 -0700107 do_signal();
Al Viroa42c6de2012-05-23 14:44:37 -0400108 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
Al Virod50349b2012-04-24 02:37:07 -0400109 tracehook_notify_resume(&current->thread.regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112void exit_thread(void)
113{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
Jeff Dike995473a2006-09-27 01:50:40 -0700115
Al Viroc2220b22012-01-30 16:30:48 -0500116int get_current_pid(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Al Viroc2220b22012-01-30 16:30:48 -0500118 return task_pid_nr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Jeff Dikeba180fd2007-10-16 01:27:00 -0700121/*
122 * This is called magically, by its address being stuffed in a jmp_buf
Jeff Dike77bf4402007-10-16 01:26:58 -0700123 * and being longjmp-d to.
124 */
125void new_thread_handler(void)
126{
127 int (*fn)(void *), n;
128 void *arg;
129
Jeff Dikeba180fd2007-10-16 01:27:00 -0700130 if (current->thread.prev_sched != NULL)
Jeff Dike77bf4402007-10-16 01:26:58 -0700131 schedule_tail(current->thread.prev_sched);
132 current->thread.prev_sched = NULL;
133
134 fn = current->thread.request.u.thread.proc;
135 arg = current->thread.request.u.thread.arg;
136
Jeff Dikeba180fd2007-10-16 01:27:00 -0700137 /*
138 * The return value is 1 if the kernel thread execs a process,
Jeff Dike77bf4402007-10-16 01:26:58 -0700139 * 0 if it just exits
140 */
141 n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
Al Virob8a42092012-05-23 00:25:15 -0400142 if (n == 1)
Jeff Dike77bf4402007-10-16 01:26:58 -0700143 userspace(&current->thread.regs.regs);
Al Virob8a42092012-05-23 00:25:15 -0400144 else
145 do_exit(0);
Jeff Dike77bf4402007-10-16 01:26:58 -0700146}
147
148/* Called magically, see new_thread_handler above */
149void fork_handler(void)
150{
151 force_flush_all();
Jeff Dike77bf4402007-10-16 01:26:58 -0700152
153 schedule_tail(current->thread.prev_sched);
154
Jeff Dikeba180fd2007-10-16 01:27:00 -0700155 /*
156 * XXX: if interrupt_end() calls schedule, this call to
Jeff Dike77bf4402007-10-16 01:26:58 -0700157 * arch_switch_to isn't needed. We could want to apply this to
Jeff Dikeba180fd2007-10-16 01:27:00 -0700158 * improve performance. -bb
159 */
Karol Swietlicki291248f2008-02-04 22:30:49 -0800160 arch_switch_to(current);
Jeff Dike77bf4402007-10-16 01:26:58 -0700161
162 current->thread.prev_sched = NULL;
163
Jeff Dike77bf4402007-10-16 01:26:58 -0700164 userspace(&current->thread.regs.regs);
165}
166
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700167int copy_thread(unsigned long clone_flags, unsigned long sp,
Al Viro1f02ab42012-09-21 20:32:29 -0400168 unsigned long arg, struct task_struct * p,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 struct pt_regs *regs)
170{
Jeff Dike77bf4402007-10-16 01:26:58 -0700171 void (*handler)(void);
Al Viroa4d94ff2012-09-20 09:28:25 -0400172 int kthread = current->flags & PF_KTHREAD;
Jeff Dike77bf4402007-10-16 01:26:58 -0700173 int ret = 0;
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 p->thread = (struct thread_struct) INIT_THREAD;
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800176
Al Viroa4d94ff2012-09-20 09:28:25 -0400177 if (!kthread) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700178 memcpy(&p->thread.regs.regs, &regs->regs,
179 sizeof(p->thread.regs.regs));
Al Viroa3170d22012-05-22 21:16:35 -0400180 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700181 if (sp != 0)
Jeff Dike18baddd2007-10-16 01:27:07 -0700182 REGS_SP(p->thread.regs.regs.gp) = sp;
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800183
Jeff Dike77bf4402007-10-16 01:26:58 -0700184 handler = fork_handler;
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800185
Jeff Dike77bf4402007-10-16 01:26:58 -0700186 arch_copy_thread(&current->thread.arch, &p->thread.arch);
Al Viroa4d94ff2012-09-20 09:28:25 -0400187 } else {
Ingo van Lilfbfe9c82011-09-14 16:21:23 -0700188 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
Al Viro1f02ab42012-09-21 20:32:29 -0400189 p->thread.request.u.thread.proc = (int (*)(void *))sp;
190 p->thread.request.u.thread.arg = (void *)arg;
Jeff Dike77bf4402007-10-16 01:26:58 -0700191 handler = new_thread_handler;
192 }
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800193
Jeff Dike77bf4402007-10-16 01:26:58 -0700194 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
195
Al Viroa4d94ff2012-09-20 09:28:25 -0400196 if (!kthread) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700197 clear_flushed_tls(p);
198
199 /*
200 * Set a new TLS for the child thread?
201 */
202 if (clone_flags & CLONE_SETTLS)
203 ret = arch_copy_tls(p);
204 }
205
Paolo 'Blaisorblade' Giarrussoaa6758d2006-03-31 02:30:22 -0800206 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209void initial_thread_cb(void (*proc)(void *), void *arg)
210{
211 int save_kmalloc_ok = kmalloc_ok;
212
213 kmalloc_ok = 0;
Jeff Dike6aa802c2007-10-16 01:26:56 -0700214 initial_thread_cb_skas(proc, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 kmalloc_ok = save_kmalloc_ok;
216}
Jeff Dike995473a2006-09-27 01:50:40 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218void default_idle(void)
219{
Jeff Dikeb160fb62007-10-16 01:27:26 -0700220 unsigned long long nsecs;
221
Jeff Dikec5d4bb12008-02-04 22:31:14 -0800222 while (1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 /* endless idle loop with no priority at all */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * although we are an idle CPU, we do not want to
227 * get into the scheduler unnecessarily.
228 */
Jeff Dikeba180fd2007-10-16 01:27:00 -0700229 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 schedule();
Jeff Dike995473a2006-09-27 01:50:40 -0700231
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100232 tick_nohz_idle_enter();
233 rcu_idle_enter();
Jeff Dikeb160fb62007-10-16 01:27:26 -0700234 nsecs = disable_timer();
235 idle_sleep(nsecs);
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100236 rcu_idle_exit();
237 tick_nohz_idle_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239}
240
241void cpu_idle(void)
242{
Jeff Dikea5a678c2008-02-04 22:30:54 -0800243 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
Jeff Dike77bf4402007-10-16 01:26:58 -0700244 default_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
246
Paolo 'Blaisorblade' Giarrussob6316292006-01-18 17:42:58 -0800247int __cant_sleep(void) {
248 return in_atomic() || irqs_disabled() || in_interrupt();
249 /* Is in_interrupt() really needed? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252int user_context(unsigned long sp)
253{
254 unsigned long stack;
255
256 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
Jeff Dikea5a678c2008-02-04 22:30:54 -0800257 return stack != (unsigned long) current_thread_info();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
261
262void do_uml_exitcalls(void)
263{
264 exitcall_t *call;
265
266 call = &__uml_exitcall_end;
267 while (--call >= &__uml_exitcall_begin)
268 (*call)();
269}
270
WANG Congc0a92902008-02-04 22:30:41 -0800271char *uml_strdup(const char *string)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Robert Lovedfe52242005-06-23 00:09:04 -0700273 return kstrdup(string, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
Al Viro73395a02011-08-18 20:14:10 +0100275EXPORT_SYMBOL(uml_strdup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277int copy_to_user_proc(void __user *to, void *from, int size)
278{
Jeff Dike6e21aec2007-05-06 14:51:21 -0700279 return copy_to_user(to, from, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
282int copy_from_user_proc(void *to, void __user *from, int size)
283{
Jeff Dike6e21aec2007-05-06 14:51:21 -0700284 return copy_from_user(to, from, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
287int clear_user_proc(void __user *buf, int size)
288{
Jeff Dike6e21aec2007-05-06 14:51:21 -0700289 return clear_user(buf, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
292int strlen_user_proc(char __user *str)
293{
Jeff Dike6e21aec2007-05-06 14:51:21 -0700294 return strlen_user(str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
297int smp_sigio_handler(void)
298{
299#ifdef CONFIG_SMP
Jeff Dikea5a678c2008-02-04 22:30:54 -0800300 int cpu = current_thread_info()->cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 IPI_handler(cpu);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700302 if (cpu != 0)
Jeff Dike6e21aec2007-05-06 14:51:21 -0700303 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304#endif
Jeff Dike6e21aec2007-05-06 14:51:21 -0700305 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308int cpu(void)
309{
Jeff Dikea5a678c2008-02-04 22:30:54 -0800310 return current_thread_info()->cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
313static atomic_t using_sysemu = ATOMIC_INIT(0);
314int sysemu_supported;
315
316void set_using_sysemu(int value)
317{
318 if (value > sysemu_supported)
319 return;
320 atomic_set(&using_sysemu, value);
321}
322
323int get_using_sysemu(void)
324{
325 return atomic_read(&using_sysemu);
326}
327
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -0800328static int sysemu_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -0800330 seq_printf(m, "%d\n", get_using_sysemu());
331 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332}
333
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -0800334static int sysemu_proc_open(struct inode *inode, struct file *file)
335{
336 return single_open(file, sysemu_proc_show, NULL);
337}
338
339static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
340 size_t count, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
342 char tmp[2];
343
344 if (copy_from_user(tmp, buf, 1))
345 return -EFAULT;
346
347 if (tmp[0] >= '0' && tmp[0] <= '2')
348 set_using_sysemu(tmp[0] - '0');
Jeff Dikeba180fd2007-10-16 01:27:00 -0700349 /* We use the first char, but pretend to write everything */
350 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -0800353static const struct file_operations sysemu_proc_fops = {
354 .owner = THIS_MODULE,
355 .open = sysemu_proc_open,
356 .read = seq_read,
357 .llseek = seq_lseek,
358 .release = single_release,
359 .write = sysemu_proc_write,
360};
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362int __init make_proc_sysemu(void)
363{
364 struct proc_dir_entry *ent;
365 if (!sysemu_supported)
366 return 0;
367
Alexey Dobriyan6613c5e2009-12-14 18:00:11 -0800368 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 if (ent == NULL)
371 {
Christophe Lucas30f417c2005-07-28 21:16:12 -0700372 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
Jeff Dike6e21aec2007-05-06 14:51:21 -0700373 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 return 0;
377}
378
379late_initcall(make_proc_sysemu);
380
381int singlestepping(void * t)
382{
383 struct task_struct *task = t ? t : current;
384
Jeff Dikec5d4bb12008-02-04 22:31:14 -0800385 if (!(task->ptrace & PT_DTRACE))
Jeff Dikeba180fd2007-10-16 01:27:00 -0700386 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 if (task->thread.singlestep_syscall)
Jeff Dikeba180fd2007-10-16 01:27:00 -0700389 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391 return 2;
392}
393
Bodo Stroesserb8bd0222005-05-06 21:30:53 -0700394/*
395 * Only x86 and x86_64 have an arch_align_stack().
396 * All other arches have "#define arch_align_stack(x) (x)"
397 * in their asm/system.h
398 * As this is included in UML from asm-um/system-generic.h,
399 * we can use it to behave as the subarch does.
400 */
401#ifndef arch_align_stack
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402unsigned long arch_align_stack(unsigned long sp)
403{
Jeff Dike8f80e942006-09-25 23:33:01 -0700404 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 sp -= get_random_int() % 8192;
406 return sp & ~0xf;
407}
Bodo Stroesserb8bd0222005-05-06 21:30:53 -0700408#endif
Jeff Dikec1127462008-02-04 22:30:36 -0800409
410unsigned long get_wchan(struct task_struct *p)
411{
412 unsigned long stack_page, sp, ip;
413 bool seen_sched = 0;
414
415 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
416 return 0;
417
418 stack_page = (unsigned long) task_stack_page(p);
419 /* Bail if the process has no kernel stack for some reason */
420 if (stack_page == 0)
421 return 0;
422
423 sp = p->thread.switch_buf->JB_SP;
424 /*
425 * Bail if the stack pointer is below the bottom of the kernel
426 * stack for some reason
427 */
428 if (sp < stack_page)
429 return 0;
430
431 while (sp < stack_page + THREAD_SIZE) {
432 ip = *((unsigned long *) sp);
433 if (in_sched_functions(ip))
434 /* Ignore everything until we're above the scheduler */
435 seen_sched = 1;
436 else if (kernel_text_address(ip) && seen_sched)
437 return ip;
438
439 sp += sizeof(unsigned long);
440 }
441
442 return 0;
443}
Jeff Dike8192ab42008-02-04 22:30:53 -0800444
445int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
446{
447 int cpu = current_thread_info()->cpu;
448
449 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
450}
451