blob: 54e9ef673e68623079658cd38c5837c4f7a619db [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070026#include <linux/signal.h>
Steve Grubbc2f0c7c2005-05-06 12:38:39 +010027#include <linux/audit.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080028#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
33
34/*
35 * SLAB caches for signal bits.
36 */
37
38static kmem_cache_t *sigqueue_cachep;
39
40/*
41 * In POSIX a signal is sent either to a specific thread (Linux task)
42 * or to the process as a whole (Linux thread group). How the signal
43 * is sent determines whether it's to one thread or the whole group,
44 * which determines which signal mask(s) are involved in blocking it
45 * from being delivered until later. When the signal is delivered,
46 * either it's caught or ignored by a user handler or it has a default
47 * effect that applies to the whole thread group (POSIX process).
48 *
49 * The possible effects an unblocked signal set to SIG_DFL can have are:
50 * ignore - Nothing Happens
51 * terminate - kill the process, i.e. all threads in the group,
52 * similar to exit_group. The group leader (only) reports
53 * WIFSIGNALED status to its parent.
54 * coredump - write a core dump file describing all threads using
55 * the same mm and then kill all those threads
56 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 *
58 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59 * Other signals when not blocked and set to SIG_DFL behaves as follows.
60 * The job control signals also have other special effects.
61 *
62 * +--------------------+------------------+
63 * | POSIX signal | default action |
64 * +--------------------+------------------+
65 * | SIGHUP | terminate |
66 * | SIGINT | terminate |
67 * | SIGQUIT | coredump |
68 * | SIGILL | coredump |
69 * | SIGTRAP | coredump |
70 * | SIGABRT/SIGIOT | coredump |
71 * | SIGBUS | coredump |
72 * | SIGFPE | coredump |
73 * | SIGKILL | terminate(+) |
74 * | SIGUSR1 | terminate |
75 * | SIGSEGV | coredump |
76 * | SIGUSR2 | terminate |
77 * | SIGPIPE | terminate |
78 * | SIGALRM | terminate |
79 * | SIGTERM | terminate |
80 * | SIGCHLD | ignore |
81 * | SIGCONT | ignore(*) |
82 * | SIGSTOP | stop(*)(+) |
83 * | SIGTSTP | stop(*) |
84 * | SIGTTIN | stop(*) |
85 * | SIGTTOU | stop(*) |
86 * | SIGURG | ignore |
87 * | SIGXCPU | coredump |
88 * | SIGXFSZ | coredump |
89 * | SIGVTALRM | terminate |
90 * | SIGPROF | terminate |
91 * | SIGPOLL/SIGIO | terminate |
92 * | SIGSYS/SIGUNUSED | coredump |
93 * | SIGSTKFLT | terminate |
94 * | SIGWINCH | ignore |
95 * | SIGPWR | terminate |
96 * | SIGRTMIN-SIGRTMAX | terminate |
97 * +--------------------+------------------+
98 * | non-POSIX signal | default action |
99 * +--------------------+------------------+
100 * | SIGEMT | coredump |
101 * +--------------------+------------------+
102 *
103 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104 * (*) Special job control effects:
105 * When SIGCONT is sent, it resumes the process (all threads in the group)
106 * from TASK_STOPPED state and also clears any pending/queued stop signals
107 * (any of those marked with "stop(*)"). This happens regardless of blocking,
108 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
109 * any pending/queued SIGCONT signals; this happens regardless of blocking,
110 * catching, or ignored the stop signal, though (except for SIGSTOP) the
111 * default action of stopping the process may happen later or never.
112 */
113
114#ifdef SIGEMT
115#define M_SIGEMT M(SIGEMT)
116#else
117#define M_SIGEMT 0
118#endif
119
120#if SIGRTMIN > BITS_PER_LONG
121#define M(sig) (1ULL << ((sig)-1))
122#else
123#define M(sig) (1UL << ((sig)-1))
124#endif
125#define T(sig, mask) (M(sig) & (mask))
126
127#define SIG_KERNEL_ONLY_MASK (\
128 M(SIGKILL) | M(SIGSTOP) )
129
130#define SIG_KERNEL_STOP_MASK (\
131 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132
133#define SIG_KERNEL_COREDUMP_MASK (\
134 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
135 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
136 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137
138#define SIG_KERNEL_IGNORE_MASK (\
139 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140
141#define sig_kernel_only(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
143#define sig_kernel_coredump(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
145#define sig_kernel_ignore(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
147#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149
Oleg Nesterova9e88e82006-03-28 16:11:14 -0800150#define sig_needs_tasklist(sig) \
151 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157#define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160
161static int sig_ignored(struct task_struct *t, int sig)
162{
163 void __user * handler;
164
165 /*
166 * Tracers always want to know about signals..
167 */
168 if (t->ptrace & PT_PTRACED)
169 return 0;
170
171 /*
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
174 * unblocked.
175 */
176 if (sigismember(&t->blocked, sig))
177 return 0;
178
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
183}
184
185/*
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
188 */
189static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190{
191 unsigned long ready;
192 long i;
193
194 switch (_NSIG_WORDS) {
195 default:
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
198 break;
199
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
208 break;
209
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 }
212 return ready != 0;
213}
214
215#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216
217fastcall void recalc_sigpending_tsk(struct task_struct *t)
218{
219 if (t->signal->group_stop_count > 0 ||
Christoph Lameter3e1d1d22005-06-24 23:13:50 -0700220 (freezing(t)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 PENDING(&t->pending, &t->blocked) ||
222 PENDING(&t->signal->shared_pending, &t->blocked))
223 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 else
225 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226}
227
228void recalc_sigpending(void)
229{
230 recalc_sigpending_tsk(current);
231}
232
233/* Given the mask, find the first available signal that should be serviced. */
234
235static int
236next_signal(struct sigpending *pending, sigset_t *mask)
237{
238 unsigned long i, *s, *m, x;
239 int sig = 0;
240
241 s = pending->signal.sig;
242 m = mask->sig;
243 switch (_NSIG_WORDS) {
244 default:
245 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246 if ((x = *s &~ *m) != 0) {
247 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 break;
249 }
250 break;
251
252 case 2: if ((x = s[0] &~ m[0]) != 0)
253 sig = 1;
254 else if ((x = s[1] &~ m[1]) != 0)
255 sig = _NSIG_BPW + 1;
256 else
257 break;
258 sig += ffz(~x);
259 break;
260
261 case 1: if ((x = *s &~ *m) != 0)
262 sig = ffz(~x) + 1;
263 break;
264 }
265
266 return sig;
267}
268
Al Virodd0fc662005-10-07 07:46:04 +0100269static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 int override_rlimit)
271{
272 struct sigqueue *q = NULL;
273
274 atomic_inc(&t->user->sigpending);
275 if (override_rlimit ||
276 atomic_read(&t->user->sigpending) <=
277 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278 q = kmem_cache_alloc(sigqueue_cachep, flags);
279 if (unlikely(q == NULL)) {
280 atomic_dec(&t->user->sigpending);
281 } else {
282 INIT_LIST_HEAD(&q->list);
283 q->flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 q->user = get_uid(t->user);
285 }
286 return(q);
287}
288
Andrew Morton514a01b2006-02-03 03:04:41 -0800289static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
291 if (q->flags & SIGQUEUE_PREALLOC)
292 return;
293 atomic_dec(&q->user->sigpending);
294 free_uid(q->user);
295 kmem_cache_free(sigqueue_cachep, q);
296}
297
298static void flush_sigqueue(struct sigpending *queue)
299{
300 struct sigqueue *q;
301
302 sigemptyset(&queue->signal);
303 while (!list_empty(&queue->list)) {
304 q = list_entry(queue->list.next, struct sigqueue , list);
305 list_del_init(&q->list);
306 __sigqueue_free(q);
307 }
308}
309
310/*
311 * Flush all pending signals for a task.
312 */
313
314void
315flush_signals(struct task_struct *t)
316{
317 unsigned long flags;
318
319 spin_lock_irqsave(&t->sighand->siglock, flags);
320 clear_tsk_thread_flag(t,TIF_SIGPENDING);
321 flush_sigqueue(&t->pending);
322 flush_sigqueue(&t->signal->shared_pending);
323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
324}
325
326/*
327 * This function expects the tasklist_lock write-locked.
328 */
329void __exit_sighand(struct task_struct *tsk)
330{
331 struct sighand_struct * sighand = tsk->sighand;
332
333 /* Ok, we're done with the signal handlers */
334 tsk->sighand = NULL;
335 if (atomic_dec_and_test(&sighand->count))
Oleg Nesterovaa1757f2006-03-28 16:11:12 -0800336 kmem_cache_free(sighand_cachep, sighand);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339/*
340 * This function expects the tasklist_lock write-locked.
341 */
342void __exit_signal(struct task_struct *tsk)
343{
344 struct signal_struct * sig = tsk->signal;
Ingo Molnare56d0902006-01-08 01:01:37 -0800345 struct sighand_struct * sighand;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 if (!sig)
348 BUG();
349 if (!atomic_read(&sig->count))
350 BUG();
Ingo Molnare56d0902006-01-08 01:01:37 -0800351 rcu_read_lock();
352 sighand = rcu_dereference(tsk->sighand);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 spin_lock(&sighand->siglock);
354 posix_cpu_timers_exit(tsk);
355 if (atomic_dec_and_test(&sig->count)) {
356 posix_cpu_timers_exit_group(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 tsk->signal = NULL;
Ingo Molnare56d0902006-01-08 01:01:37 -0800358 __exit_sighand(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 spin_unlock(&sighand->siglock);
360 flush_sigqueue(&sig->shared_pending);
361 } else {
362 /*
363 * If there is any task waiting for the group exit
364 * then notify it:
365 */
366 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
367 wake_up_process(sig->group_exit_task);
368 sig->group_exit_task = NULL;
369 }
370 if (tsk == sig->curr_target)
371 sig->curr_target = next_thread(tsk);
372 tsk->signal = NULL;
373 /*
374 * Accumulate here the counters for all threads but the
375 * group leader as they die, so they can be added into
376 * the process-wide totals when those are taken.
377 * The group leader stays around as a zombie as long
378 * as there are other threads. When it gets reaped,
379 * the exit.c code will add its counts into these totals.
380 * We won't ever get here for the group leader, since it
381 * will have been the last reference on the signal_struct.
382 */
383 sig->utime = cputime_add(sig->utime, tsk->utime);
384 sig->stime = cputime_add(sig->stime, tsk->stime);
385 sig->min_flt += tsk->min_flt;
386 sig->maj_flt += tsk->maj_flt;
387 sig->nvcsw += tsk->nvcsw;
388 sig->nivcsw += tsk->nivcsw;
389 sig->sched_time += tsk->sched_time;
Ingo Molnare56d0902006-01-08 01:01:37 -0800390 __exit_sighand(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 spin_unlock(&sighand->siglock);
392 sig = NULL; /* Marker for below. */
393 }
Ingo Molnare56d0902006-01-08 01:01:37 -0800394 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending);
397 if (sig) {
Oleg Nesterov6b3934ef2006-03-28 16:11:16 -0800398 __cleanup_signal(sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 }
400}
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402/*
403 * Flush all handlers for a task.
404 */
405
406void
407flush_signal_handlers(struct task_struct *t, int force_default)
408{
409 int i;
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
414 ka->sa.sa_flags = 0;
415 sigemptyset(&ka->sa.sa_mask);
416 ka++;
417 }
418}
419
420
421/* Notify the system that a driver wants to block all signals for this
422 * process, and wants to be notified if any signals at all were to be
423 * sent/acted upon. If the notifier routine returns non-zero, then the
424 * signal will be acted upon after all. If the notifier routine returns 0,
425 * then then signal will be blocked. Only one block per process is
426 * allowed. priv is a pointer to private data that the notifier routine
427 * can use to determine if the signal should be blocked or not. */
428
429void
430block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
431{
432 unsigned long flags;
433
434 spin_lock_irqsave(&current->sighand->siglock, flags);
435 current->notifier_mask = mask;
436 current->notifier_data = priv;
437 current->notifier = notifier;
438 spin_unlock_irqrestore(&current->sighand->siglock, flags);
439}
440
441/* Notify the system that blocking has ended. */
442
443void
444unblock_all_signals(void)
445{
446 unsigned long flags;
447
448 spin_lock_irqsave(&current->sighand->siglock, flags);
449 current->notifier = NULL;
450 current->notifier_data = NULL;
451 recalc_sigpending();
452 spin_unlock_irqrestore(&current->sighand->siglock, flags);
453}
454
Arjan van de Ven858119e2006-01-14 13:20:43 -0800455static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 struct sigqueue *q, *first = NULL;
458 int still_pending = 0;
459
460 if (unlikely(!sigismember(&list->signal, sig)))
461 return 0;
462
463 /*
464 * Collect the siginfo appropriate to this signal. Check if
465 * there is another siginfo for the same signal.
466 */
467 list_for_each_entry(q, &list->list, list) {
468 if (q->info.si_signo == sig) {
469 if (first) {
470 still_pending = 1;
471 break;
472 }
473 first = q;
474 }
475 }
476 if (first) {
477 list_del_init(&first->list);
478 copy_siginfo(info, &first->info);
479 __sigqueue_free(first);
480 if (!still_pending)
481 sigdelset(&list->signal, sig);
482 } else {
483
484 /* Ok, it wasn't in the queue. This must be
485 a fast-pathed signal or we must have been
486 out of queue space. So zero out the info.
487 */
488 sigdelset(&list->signal, sig);
489 info->si_signo = sig;
490 info->si_errno = 0;
491 info->si_code = 0;
492 info->si_pid = 0;
493 info->si_uid = 0;
494 }
495 return 1;
496}
497
498static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
499 siginfo_t *info)
500{
501 int sig = 0;
502
Heiko Carstensb17b0422005-11-13 16:07:14 -0800503 sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (sig) {
505 if (current->notifier) {
506 if (sigismember(current->notifier_mask, sig)) {
507 if (!(current->notifier)(current->notifier_data)) {
508 clear_thread_flag(TIF_SIGPENDING);
509 return 0;
510 }
511 }
512 }
513
514 if (!collect_signal(sig, pending, info))
515 sig = 0;
516
517 }
518 recalc_sigpending();
519
520 return sig;
521}
522
523/*
524 * Dequeue a signal and return the element to the caller, which is
525 * expected to free it.
526 *
527 * All callers have to hold the siglock.
528 */
529int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
530{
531 int signr = __dequeue_signal(&tsk->pending, mask, info);
532 if (!signr)
533 signr = __dequeue_signal(&tsk->signal->shared_pending,
534 mask, info);
535 if (signr && unlikely(sig_kernel_stop(signr))) {
536 /*
537 * Set a marker that we have dequeued a stop signal. Our
538 * caller might release the siglock and then the pending
539 * stop signal it is about to process is no longer in the
540 * pending bitmasks, but must still be cleared by a SIGCONT
541 * (and overruled by a SIGKILL). So those cases clear this
542 * shared flag after we've set it. Note that this flag may
543 * remain set after the signal we return is ignored or
544 * handled. That doesn't matter because its only purpose
545 * is to alert stop-signal processing code when another
546 * processor has come along and cleared the flag.
547 */
Oleg Nesterov788e05a2005-10-07 17:46:19 +0400548 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
549 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551 if ( signr &&
552 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
553 info->si_sys_private){
554 /*
555 * Release the siglock to ensure proper locking order
556 * of timer locks outside of siglocks. Note, we leave
557 * irqs disabled here, since the posix-timers code is
558 * about to disable them again anyway.
559 */
560 spin_unlock(&tsk->sighand->siglock);
561 do_schedule_next_timer(info);
562 spin_lock(&tsk->sighand->siglock);
563 }
564 return signr;
565}
566
567/*
568 * Tell a process that it has a new active signal..
569 *
570 * NOTE! we rely on the previous spin_lock to
571 * lock interrupts for us! We can only be called with
572 * "siglock" held, and the local interrupt must
573 * have been disabled when that got acquired!
574 *
575 * No need to set need_resched since signal event passing
576 * goes through ->blocked
577 */
578void signal_wake_up(struct task_struct *t, int resume)
579{
580 unsigned int mask;
581
582 set_tsk_thread_flag(t, TIF_SIGPENDING);
583
584 /*
585 * For SIGKILL, we want to wake it up in the stopped/traced case.
586 * We don't check t->state here because there is a race with it
587 * executing another processor and just now entering stopped state.
588 * By using wake_up_state, we ensure the process will wake up and
589 * handle its death signal.
590 */
591 mask = TASK_INTERRUPTIBLE;
592 if (resume)
593 mask |= TASK_STOPPED | TASK_TRACED;
594 if (!wake_up_state(t, mask))
595 kick_process(t);
596}
597
598/*
599 * Remove signals in mask from the pending set and queue.
600 * Returns 1 if any signals were found.
601 *
602 * All callers must be holding the siglock.
George Anzinger71fabd5e2006-01-08 01:02:48 -0800603 *
604 * This version takes a sigset mask and looks at all signals,
605 * not just those in the first mask word.
606 */
607static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
608{
609 struct sigqueue *q, *n;
610 sigset_t m;
611
612 sigandsets(&m, mask, &s->signal);
613 if (sigisemptyset(&m))
614 return 0;
615
616 signandsets(&s->signal, &s->signal, mask);
617 list_for_each_entry_safe(q, n, &s->list, list) {
618 if (sigismember(mask, q->info.si_signo)) {
619 list_del_init(&q->list);
620 __sigqueue_free(q);
621 }
622 }
623 return 1;
624}
625/*
626 * Remove signals in mask from the pending set and queue.
627 * Returns 1 if any signals were found.
628 *
629 * All callers must be holding the siglock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 */
631static int rm_from_queue(unsigned long mask, struct sigpending *s)
632{
633 struct sigqueue *q, *n;
634
635 if (!sigtestsetmask(&s->signal, mask))
636 return 0;
637
638 sigdelsetmask(&s->signal, mask);
639 list_for_each_entry_safe(q, n, &s->list, list) {
640 if (q->info.si_signo < SIGRTMIN &&
641 (mask & sigmask(q->info.si_signo))) {
642 list_del_init(&q->list);
643 __sigqueue_free(q);
644 }
645 }
646 return 1;
647}
648
649/*
650 * Bad permissions for sending the signal
651 */
652static int check_kill_permission(int sig, struct siginfo *info,
653 struct task_struct *t)
654{
655 int error = -EINVAL;
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700656 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return error;
658 error = -EPERM;
Oleg Nesterov621d3122005-10-30 15:03:45 -0800659 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 && ((sig != SIGCONT) ||
661 (current->signal->session != t->signal->session))
662 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
663 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
664 && !capable(CAP_KILL))
665 return error;
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100666
667 error = security_task_kill(t, info, sig);
668 if (!error)
669 audit_signal_info(sig, t); /* Let audit system see the signal */
670 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671}
672
673/* forward decl */
674static void do_notify_parent_cldstop(struct task_struct *tsk,
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700675 int to_self,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 int why);
677
678/*
679 * Handle magic process-wide effects of stop/continue signals.
680 * Unlike the signal actions, these happen immediately at signal-generation
681 * time regardless of blocking, ignoring, or handling. This does the
682 * actual continuing for SIGCONT, but not the actual stopping for stop
683 * signals. The process stop is done as a signal action for SIG_DFL.
684 */
685static void handle_stop_signal(int sig, struct task_struct *p)
686{
687 struct task_struct *t;
688
Bhavesh P. Davdadd12f482005-08-17 12:26:33 -0600689 if (p->signal->flags & SIGNAL_GROUP_EXIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 /*
691 * The process is in the middle of dying already.
692 */
693 return;
694
695 if (sig_kernel_stop(sig)) {
696 /*
697 * This is a stop signal. Remove SIGCONT from all queues.
698 */
699 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
700 t = p;
701 do {
702 rm_from_queue(sigmask(SIGCONT), &t->pending);
703 t = next_thread(t);
704 } while (t != p);
705 } else if (sig == SIGCONT) {
706 /*
707 * Remove all stop signals from all queues,
708 * and wake all threads.
709 */
710 if (unlikely(p->signal->group_stop_count > 0)) {
711 /*
712 * There was a group stop in progress. We'll
713 * pretend it finished before we got here. We are
714 * obliged to report it to the parent: if the
715 * SIGSTOP happened "after" this SIGCONT, then it
716 * would have cleared this pending SIGCONT. If it
717 * happened "before" this SIGCONT, then the parent
718 * got the SIGCHLD about the stop finishing before
719 * the continue happened. We do the notification
720 * now, and it's as if the stop had finished and
721 * the SIGCHLD was pending on entry to this kill.
722 */
723 p->signal->group_stop_count = 0;
724 p->signal->flags = SIGNAL_STOP_CONTINUED;
725 spin_unlock(&p->sighand->siglock);
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700726 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 spin_lock(&p->sighand->siglock);
728 }
729 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
730 t = p;
731 do {
732 unsigned int state;
733 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
734
735 /*
736 * If there is a handler for SIGCONT, we must make
737 * sure that no thread returns to user mode before
738 * we post the signal, in case it was the only
739 * thread eligible to run the signal handler--then
740 * it must not do anything between resuming and
741 * running the handler. With the TIF_SIGPENDING
742 * flag set, the thread will pause and acquire the
743 * siglock that we hold now and until we've queued
744 * the pending signal.
745 *
746 * Wake up the stopped thread _after_ setting
747 * TIF_SIGPENDING
748 */
749 state = TASK_STOPPED;
750 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
751 set_tsk_thread_flag(t, TIF_SIGPENDING);
752 state |= TASK_INTERRUPTIBLE;
753 }
754 wake_up_state(t, state);
755
756 t = next_thread(t);
757 } while (t != p);
758
759 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
760 /*
761 * We were in fact stopped, and are now continued.
762 * Notify the parent with CLD_CONTINUED.
763 */
764 p->signal->flags = SIGNAL_STOP_CONTINUED;
765 p->signal->group_exit_code = 0;
766 spin_unlock(&p->sighand->siglock);
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700767 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 spin_lock(&p->sighand->siglock);
769 } else {
770 /*
771 * We are not stopped, but there could be a stop
772 * signal in the middle of being processed after
773 * being removed from the queue. Clear that too.
774 */
775 p->signal->flags = 0;
776 }
777 } else if (sig == SIGKILL) {
778 /*
779 * Make sure that any pending stop signal already dequeued
780 * is undone by the wakeup for SIGKILL.
781 */
782 p->signal->flags = 0;
783 }
784}
785
786static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
787 struct sigpending *signals)
788{
789 struct sigqueue * q = NULL;
790 int ret = 0;
791
792 /*
793 * fast-pathed signals for kernel-internal things like SIGSTOP
794 * or SIGKILL.
795 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800796 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 goto out_set;
798
799 /* Real-time signals must be queued if sent by sigqueue, or
800 some other real-time mechanism. It is implementation
801 defined whether kill() does so. We attempt to do so, on
802 the principle of least surprise, but since kill is not
803 allowed to fail with EAGAIN when low on memory we just
804 make sure at least one signal gets delivered and don't
805 pass on the info struct. */
806
807 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
Oleg Nesterov621d3122005-10-30 15:03:45 -0800808 (is_si_special(info) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 info->si_code >= 0)));
810 if (q) {
811 list_add_tail(&q->list, &signals->list);
812 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800813 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 q->info.si_signo = sig;
815 q->info.si_errno = 0;
816 q->info.si_code = SI_USER;
817 q->info.si_pid = current->pid;
818 q->info.si_uid = current->uid;
819 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800820 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 q->info.si_signo = sig;
822 q->info.si_errno = 0;
823 q->info.si_code = SI_KERNEL;
824 q->info.si_pid = 0;
825 q->info.si_uid = 0;
826 break;
827 default:
828 copy_siginfo(&q->info, info);
829 break;
830 }
Oleg Nesterov621d3122005-10-30 15:03:45 -0800831 } else if (!is_si_special(info)) {
832 if (sig >= SIGRTMIN && info->si_code != SI_USER)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 /*
834 * Queue overflow, abort. We may abort if the signal was rt
835 * and sent by user using something other than kill().
836 */
837 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 }
839
840out_set:
841 sigaddset(&signals->signal, sig);
842 return ret;
843}
844
845#define LEGACY_QUEUE(sigptr, sig) \
846 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
847
848
849static int
850specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
851{
852 int ret = 0;
853
854 if (!irqs_disabled())
855 BUG();
856 assert_spin_locked(&t->sighand->siglock);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 /* Short-circuit ignored signals. */
859 if (sig_ignored(t, sig))
860 goto out;
861
862 /* Support queueing exactly one non-rt signal, so that we
863 can get more detailed information about the cause of
864 the signal. */
865 if (LEGACY_QUEUE(&t->pending, sig))
866 goto out;
867
868 ret = send_signal(sig, info, t, &t->pending);
869 if (!ret && !sigismember(&t->blocked, sig))
870 signal_wake_up(t, sig == SIGKILL);
871out:
872 return ret;
873}
874
875/*
876 * Force a signal that the process can't ignore: if necessary
877 * we unblock the signal and change any SIG_IGN to SIG_DFL.
878 */
879
880int
881force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
882{
883 unsigned long int flags;
884 int ret;
885
886 spin_lock_irqsave(&t->sighand->siglock, flags);
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800887 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800890 if (sigismember(&t->blocked, sig)) {
891 sigdelset(&t->blocked, sig);
892 }
893 recalc_sigpending_tsk(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 ret = specific_send_sig_info(sig, info, t);
895 spin_unlock_irqrestore(&t->sighand->siglock, flags);
896
897 return ret;
898}
899
900void
901force_sig_specific(int sig, struct task_struct *t)
902{
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800903 force_sig_info(sig, SEND_SIG_FORCED, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
906/*
907 * Test if P wants to take SIG. After we've checked all threads with this,
908 * it's equivalent to finding no threads not blocking SIG. Any threads not
909 * blocking SIG were ruled out because they are not running and already
910 * have pending signals. Such threads will dequeue from the shared queue
911 * as soon as they're available, so putting the signal on the shared queue
912 * will be equivalent to sending it to one such thread.
913 */
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700914static inline int wants_signal(int sig, struct task_struct *p)
915{
916 if (sigismember(&p->blocked, sig))
917 return 0;
918 if (p->flags & PF_EXITING)
919 return 0;
920 if (sig == SIGKILL)
921 return 1;
922 if (p->state & (TASK_STOPPED | TASK_TRACED))
923 return 0;
924 return task_curr(p) || !signal_pending(p);
925}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927static void
928__group_complete_signal(int sig, struct task_struct *p)
929{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 struct task_struct *t;
931
932 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 * Now find a thread we can wake up to take the signal off the queue.
934 *
935 * If the main thread wants the signal, it gets first crack.
936 * Probably the least surprising to the average bear.
937 */
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700938 if (wants_signal(sig, p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 t = p;
940 else if (thread_group_empty(p))
941 /*
942 * There is just one thread and it does not need to be woken.
943 * It will dequeue unblocked signals before it runs again.
944 */
945 return;
946 else {
947 /*
948 * Otherwise try to find a suitable thread.
949 */
950 t = p->signal->curr_target;
951 if (t == NULL)
952 /* restart balancing at this thread */
953 t = p->signal->curr_target = p;
954 BUG_ON(t->tgid != p->tgid);
955
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700956 while (!wants_signal(sig, t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 t = next_thread(t);
958 if (t == p->signal->curr_target)
959 /*
960 * No thread needs to be woken.
961 * Any eligible threads will see
962 * the signal in the queue soon.
963 */
964 return;
965 }
966 p->signal->curr_target = t;
967 }
968
969 /*
970 * Found a killable thread. If the signal will be fatal,
971 * then start taking the whole group down immediately.
972 */
973 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
974 !sigismember(&t->real_blocked, sig) &&
975 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
976 /*
977 * This signal will be fatal to the whole group.
978 */
979 if (!sig_kernel_coredump(sig)) {
980 /*
981 * Start a group exit and wake everybody up.
982 * This way we don't have other threads
983 * running and doing things after a slower
984 * thread has the fatal signal pending.
985 */
986 p->signal->flags = SIGNAL_GROUP_EXIT;
987 p->signal->group_exit_code = sig;
988 p->signal->group_stop_count = 0;
989 t = p;
990 do {
991 sigaddset(&t->pending.signal, SIGKILL);
992 signal_wake_up(t, 1);
993 t = next_thread(t);
994 } while (t != p);
995 return;
996 }
997
998 /*
999 * There will be a core dump. We make all threads other
1000 * than the chosen one go into a group stop so that nothing
1001 * happens until it gets scheduled, takes the signal off
1002 * the shared queue, and does the core dump. This is a
1003 * little more complicated than strictly necessary, but it
1004 * keeps the signal state that winds up in the core dump
1005 * unchanged from the death state, e.g. which thread had
1006 * the core-dump signal unblocked.
1007 */
1008 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1009 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1010 p->signal->group_stop_count = 0;
1011 p->signal->group_exit_task = t;
1012 t = p;
1013 do {
1014 p->signal->group_stop_count++;
1015 signal_wake_up(t, 0);
1016 t = next_thread(t);
1017 } while (t != p);
1018 wake_up_process(p->signal->group_exit_task);
1019 return;
1020 }
1021
1022 /*
1023 * The signal is already in the shared-pending queue.
1024 * Tell the chosen thread to wake up and dequeue it.
1025 */
1026 signal_wake_up(t, sig == SIGKILL);
1027 return;
1028}
1029
1030int
1031__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1032{
1033 int ret = 0;
1034
1035 assert_spin_locked(&p->sighand->siglock);
1036 handle_stop_signal(sig, p);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 /* Short-circuit ignored signals. */
1039 if (sig_ignored(p, sig))
1040 return ret;
1041
1042 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1043 /* This is a non-RT signal and we already have one queued. */
1044 return ret;
1045
1046 /*
1047 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1048 * We always use the shared queue for process-wide signals,
1049 * to avoid several races.
1050 */
1051 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1052 if (unlikely(ret))
1053 return ret;
1054
1055 __group_complete_signal(sig, p);
1056 return 0;
1057}
1058
1059/*
1060 * Nuke all other threads in the group.
1061 */
1062void zap_other_threads(struct task_struct *p)
1063{
1064 struct task_struct *t;
1065
1066 p->signal->flags = SIGNAL_GROUP_EXIT;
1067 p->signal->group_stop_count = 0;
1068
1069 if (thread_group_empty(p))
1070 return;
1071
1072 for (t = next_thread(p); t != p; t = next_thread(t)) {
1073 /*
1074 * Don't bother with already dead threads
1075 */
1076 if (t->exit_state)
1077 continue;
1078
1079 /*
1080 * We don't want to notify the parent, since we are
1081 * killed as part of a thread group due to another
1082 * thread doing an execve() or similar. So set the
1083 * exit signal to -1 to allow immediate reaping of
1084 * the process. But don't detach the thread group
1085 * leader.
1086 */
1087 if (t != p->group_leader)
1088 t->exit_signal = -1;
1089
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001090 /* SIGKILL will be handled before any pending SIGSTOP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 signal_wake_up(t, 1);
1093 }
1094}
1095
1096/*
Ingo Molnare56d0902006-01-08 01:01:37 -08001097 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 */
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001099struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1100{
1101 struct sighand_struct *sighand;
1102
1103 for (;;) {
1104 sighand = rcu_dereference(tsk->sighand);
1105 if (unlikely(sighand == NULL))
1106 break;
1107
1108 spin_lock_irqsave(&sighand->siglock, *flags);
1109 if (likely(sighand == tsk->sighand))
1110 break;
1111 spin_unlock_irqrestore(&sighand->siglock, *flags);
1112 }
1113
1114 return sighand;
1115}
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1118{
1119 unsigned long flags;
1120 int ret;
1121
1122 ret = check_kill_permission(sig, info, p);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001123
1124 if (!ret && sig) {
1125 ret = -ESRCH;
1126 if (lock_task_sighand(p, &flags)) {
1127 ret = __group_send_sig_info(sig, info, p);
1128 unlock_task_sighand(p, &flags);
Ingo Molnare56d0902006-01-08 01:01:37 -08001129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 }
1131
1132 return ret;
1133}
1134
1135/*
1136 * kill_pg_info() sends a signal to a process group: this is what the tty
1137 * control characters do (^C, ^Z etc)
1138 */
1139
1140int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1141{
1142 struct task_struct *p = NULL;
1143 int retval, success;
1144
1145 if (pgrp <= 0)
1146 return -EINVAL;
1147
1148 success = 0;
1149 retval = -ESRCH;
1150 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1151 int err = group_send_sig_info(sig, info, p);
1152 success |= !err;
1153 retval = err;
1154 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1155 return success ? 0 : retval;
1156}
1157
1158int
1159kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1160{
1161 int retval;
1162
1163 read_lock(&tasklist_lock);
1164 retval = __kill_pg_info(sig, info, pgrp);
1165 read_unlock(&tasklist_lock);
1166
1167 return retval;
1168}
1169
1170int
1171kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1172{
1173 int error;
Ingo Molnare56d0902006-01-08 01:01:37 -08001174 int acquired_tasklist_lock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 struct task_struct *p;
1176
Ingo Molnare56d0902006-01-08 01:01:37 -08001177 rcu_read_lock();
Oleg Nesterova9e88e82006-03-28 16:11:14 -08001178 if (unlikely(sig_needs_tasklist(sig))) {
Ingo Molnare56d0902006-01-08 01:01:37 -08001179 read_lock(&tasklist_lock);
1180 acquired_tasklist_lock = 1;
1181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 p = find_task_by_pid(pid);
1183 error = -ESRCH;
1184 if (p)
1185 error = group_send_sig_info(sig, info, p);
Ingo Molnare56d0902006-01-08 01:01:37 -08001186 if (unlikely(acquired_tasklist_lock))
1187 read_unlock(&tasklist_lock);
1188 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 return error;
1190}
1191
Harald Welte46113832005-10-10 19:44:29 +02001192/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1193int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1194 uid_t uid, uid_t euid)
1195{
1196 int ret = -EINVAL;
1197 struct task_struct *p;
1198
1199 if (!valid_signal(sig))
1200 return ret;
1201
1202 read_lock(&tasklist_lock);
1203 p = find_task_by_pid(pid);
1204 if (!p) {
1205 ret = -ESRCH;
1206 goto out_unlock;
1207 }
Oleg Nesterov0811af22006-01-08 01:03:09 -08001208 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Harald Welte46113832005-10-10 19:44:29 +02001209 && (euid != p->suid) && (euid != p->uid)
1210 && (uid != p->suid) && (uid != p->uid)) {
1211 ret = -EPERM;
1212 goto out_unlock;
1213 }
1214 if (sig && p->sighand) {
1215 unsigned long flags;
1216 spin_lock_irqsave(&p->sighand->siglock, flags);
1217 ret = __group_send_sig_info(sig, info, p);
1218 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1219 }
1220out_unlock:
1221 read_unlock(&tasklist_lock);
1222 return ret;
1223}
1224EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226/*
1227 * kill_something_info() interprets pid in interesting ways just like kill(2).
1228 *
1229 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1230 * is probably wrong. Should make it like BSD or SYSV.
1231 */
1232
1233static int kill_something_info(int sig, struct siginfo *info, int pid)
1234{
1235 if (!pid) {
1236 return kill_pg_info(sig, info, process_group(current));
1237 } else if (pid == -1) {
1238 int retval = 0, count = 0;
1239 struct task_struct * p;
1240
1241 read_lock(&tasklist_lock);
1242 for_each_process(p) {
1243 if (p->pid > 1 && p->tgid != current->tgid) {
1244 int err = group_send_sig_info(sig, info, p);
1245 ++count;
1246 if (err != -EPERM)
1247 retval = err;
1248 }
1249 }
1250 read_unlock(&tasklist_lock);
1251 return count ? retval : -ESRCH;
1252 } else if (pid < 0) {
1253 return kill_pg_info(sig, info, -pid);
1254 } else {
1255 return kill_proc_info(sig, info, pid);
1256 }
1257}
1258
1259/*
1260 * These are for backward compatibility with the rest of the kernel source.
1261 */
1262
1263/*
1264 * These two are the most common entry points. They send a signal
1265 * just to the specific thread.
1266 */
1267int
1268send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1269{
1270 int ret;
1271 unsigned long flags;
1272
1273 /*
1274 * Make sure legacy kernel users don't send in bad values
1275 * (normal paths check this in check_kill_permission).
1276 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001277 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return -EINVAL;
1279
1280 /*
1281 * We need the tasklist lock even for the specific
1282 * thread case (when we don't need to follow the group
1283 * lists) in order to avoid races with "p->sighand"
1284 * going away or changing from under us.
1285 */
1286 read_lock(&tasklist_lock);
1287 spin_lock_irqsave(&p->sighand->siglock, flags);
1288 ret = specific_send_sig_info(sig, info, p);
1289 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1290 read_unlock(&tasklist_lock);
1291 return ret;
1292}
1293
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001294#define __si_special(priv) \
1295 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297int
1298send_sig(int sig, struct task_struct *p, int priv)
1299{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001300 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303/*
1304 * This is the entry point for "process-wide" signals.
1305 * They will go to an appropriate thread in the thread group.
1306 */
1307int
1308send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1309{
1310 int ret;
1311 read_lock(&tasklist_lock);
1312 ret = group_send_sig_info(sig, info, p);
1313 read_unlock(&tasklist_lock);
1314 return ret;
1315}
1316
1317void
1318force_sig(int sig, struct task_struct *p)
1319{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001320 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
1323/*
1324 * When things go south during signal handling, we
1325 * will force a SIGSEGV. And if the signal that caused
1326 * the problem was already a SIGSEGV, we'll want to
1327 * make sure we don't even try to deliver the signal..
1328 */
1329int
1330force_sigsegv(int sig, struct task_struct *p)
1331{
1332 if (sig == SIGSEGV) {
1333 unsigned long flags;
1334 spin_lock_irqsave(&p->sighand->siglock, flags);
1335 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1336 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1337 }
1338 force_sig(SIGSEGV, p);
1339 return 0;
1340}
1341
1342int
1343kill_pg(pid_t pgrp, int sig, int priv)
1344{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001345 return kill_pg_info(sig, __si_special(priv), pgrp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
1347
1348int
1349kill_proc(pid_t pid, int sig, int priv)
1350{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001351 return kill_proc_info(sig, __si_special(priv), pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
1354/*
1355 * These functions support sending signals using preallocated sigqueue
1356 * structures. This is needed "because realtime applications cannot
1357 * afford to lose notifications of asynchronous events, like timer
1358 * expirations or I/O completions". In the case of Posix Timers
1359 * we allocate the sigqueue structure from the timer_create. If this
1360 * allocation fails we are able to report the failure to the application
1361 * with an EAGAIN error.
1362 */
1363
1364struct sigqueue *sigqueue_alloc(void)
1365{
1366 struct sigqueue *q;
1367
1368 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1369 q->flags |= SIGQUEUE_PREALLOC;
1370 return(q);
1371}
1372
1373void sigqueue_free(struct sigqueue *q)
1374{
1375 unsigned long flags;
1376 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1377 /*
1378 * If the signal is still pending remove it from the
1379 * pending queue.
1380 */
1381 if (unlikely(!list_empty(&q->list))) {
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001382 spinlock_t *lock = &current->sighand->siglock;
1383 read_lock(&tasklist_lock);
1384 spin_lock_irqsave(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 if (!list_empty(&q->list))
1386 list_del_init(&q->list);
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001387 spin_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 read_unlock(&tasklist_lock);
1389 }
1390 q->flags &= ~SIGQUEUE_PREALLOC;
1391 __sigqueue_free(q);
1392}
1393
1394int
1395send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1396{
1397 unsigned long flags;
1398 int ret = 0;
Ingo Molnare56d0902006-01-08 01:01:37 -08001399 struct sighand_struct *sh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001402
1403 /*
1404 * The rcu based delayed sighand destroy makes it possible to
1405 * run this without tasklist lock held. The task struct itself
1406 * cannot go away as create_timer did get_task_struct().
1407 *
1408 * We return -1, when the task is marked exiting, so
1409 * posix_timer_event can redirect it to the group leader
1410 */
1411 rcu_read_lock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001412
1413 if (unlikely(p->flags & PF_EXITING)) {
1414 ret = -1;
1415 goto out_err;
1416 }
1417
Ingo Molnare56d0902006-01-08 01:01:37 -08001418retry:
1419 sh = rcu_dereference(p->sighand);
1420
1421 spin_lock_irqsave(&sh->siglock, flags);
1422 if (p->sighand != sh) {
1423 /* We raced with exec() in a multithreaded process... */
1424 spin_unlock_irqrestore(&sh->siglock, flags);
1425 goto retry;
1426 }
1427
1428 /*
1429 * We do the check here again to handle the following scenario:
1430 *
1431 * CPU 0 CPU 1
1432 * send_sigqueue
1433 * check PF_EXITING
1434 * interrupt exit code running
1435 * __exit_signal
1436 * lock sighand->siglock
1437 * unlock sighand->siglock
1438 * lock sh->siglock
1439 * add(tsk->pending) flush_sigqueue(tsk->pending)
1440 *
1441 */
1442
1443 if (unlikely(p->flags & PF_EXITING)) {
1444 ret = -1;
1445 goto out;
1446 }
Oleg Nesterove752dd62005-09-06 15:17:42 -07001447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 if (unlikely(!list_empty(&q->list))) {
1449 /*
1450 * If an SI_TIMER entry is already queue just increment
1451 * the overrun count.
1452 */
1453 if (q->info.si_code != SI_TIMER)
1454 BUG();
1455 q->info.si_overrun++;
1456 goto out;
Oleg Nesterove752dd62005-09-06 15:17:42 -07001457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 /* Short-circuit ignored signals. */
1459 if (sig_ignored(p, sig)) {
1460 ret = 1;
1461 goto out;
1462 }
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 list_add_tail(&q->list, &p->pending.list);
1465 sigaddset(&p->pending.signal, sig);
1466 if (!sigismember(&p->blocked, sig))
1467 signal_wake_up(p, sig == SIGKILL);
1468
1469out:
Ingo Molnare56d0902006-01-08 01:01:37 -08001470 spin_unlock_irqrestore(&sh->siglock, flags);
Oleg Nesterove752dd62005-09-06 15:17:42 -07001471out_err:
Ingo Molnare56d0902006-01-08 01:01:37 -08001472 rcu_read_unlock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001473
1474 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
1476
1477int
1478send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1479{
1480 unsigned long flags;
1481 int ret = 0;
1482
1483 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 read_lock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001486 /* Since it_lock is held, p->sighand cannot be NULL. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 spin_lock_irqsave(&p->sighand->siglock, flags);
1488 handle_stop_signal(sig, p);
1489
1490 /* Short-circuit ignored signals. */
1491 if (sig_ignored(p, sig)) {
1492 ret = 1;
1493 goto out;
1494 }
1495
1496 if (unlikely(!list_empty(&q->list))) {
1497 /*
1498 * If an SI_TIMER entry is already queue just increment
1499 * the overrun count. Other uses should not try to
1500 * send the signal multiple times.
1501 */
1502 if (q->info.si_code != SI_TIMER)
1503 BUG();
1504 q->info.si_overrun++;
1505 goto out;
1506 }
1507
1508 /*
1509 * Put this signal on the shared-pending queue.
1510 * We always use the shared queue for process-wide signals,
1511 * to avoid several races.
1512 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 list_add_tail(&q->list, &p->signal->shared_pending.list);
1514 sigaddset(&p->signal->shared_pending.signal, sig);
1515
1516 __group_complete_signal(sig, p);
1517out:
1518 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1519 read_unlock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001520 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
1522
1523/*
1524 * Wake up any threads in the parent blocked in wait* syscalls.
1525 */
1526static inline void __wake_up_parent(struct task_struct *p,
1527 struct task_struct *parent)
1528{
1529 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1530}
1531
1532/*
1533 * Let a parent know about the death of a child.
1534 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1535 */
1536
1537void do_notify_parent(struct task_struct *tsk, int sig)
1538{
1539 struct siginfo info;
1540 unsigned long flags;
1541 struct sighand_struct *psig;
1542
1543 BUG_ON(sig == -1);
1544
1545 /* do_notify_parent_cldstop should have been called instead. */
1546 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1547
1548 BUG_ON(!tsk->ptrace &&
1549 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1550
1551 info.si_signo = sig;
1552 info.si_errno = 0;
1553 info.si_pid = tsk->pid;
1554 info.si_uid = tsk->uid;
1555
1556 /* FIXME: find out whether or not this is supposed to be c*time. */
1557 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1558 tsk->signal->utime));
1559 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1560 tsk->signal->stime));
1561
1562 info.si_status = tsk->exit_code & 0x7f;
1563 if (tsk->exit_code & 0x80)
1564 info.si_code = CLD_DUMPED;
1565 else if (tsk->exit_code & 0x7f)
1566 info.si_code = CLD_KILLED;
1567 else {
1568 info.si_code = CLD_EXITED;
1569 info.si_status = tsk->exit_code >> 8;
1570 }
1571
1572 psig = tsk->parent->sighand;
1573 spin_lock_irqsave(&psig->siglock, flags);
Oleg Nesterov7ed01752005-11-10 17:22:18 +03001574 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1576 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1577 /*
1578 * We are exiting and our parent doesn't care. POSIX.1
1579 * defines special semantics for setting SIGCHLD to SIG_IGN
1580 * or setting the SA_NOCLDWAIT flag: we should be reaped
1581 * automatically and not left for our parent's wait4 call.
1582 * Rather than having the parent do it as a magic kind of
1583 * signal handler, we just set this to tell do_exit that we
1584 * can be cleaned up without becoming a zombie. Note that
1585 * we still call __wake_up_parent in this case, because a
1586 * blocked sys_wait4 might now return -ECHILD.
1587 *
1588 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1589 * is implementation-defined: we do (if you don't want
1590 * it, just use SIG_IGN instead).
1591 */
1592 tsk->exit_signal = -1;
1593 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1594 sig = 0;
1595 }
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001596 if (valid_signal(sig) && sig > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 __group_send_sig_info(sig, &info, tsk->parent);
1598 __wake_up_parent(tsk, tsk->parent);
1599 spin_unlock_irqrestore(&psig->siglock, flags);
1600}
1601
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001602static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603{
1604 struct siginfo info;
1605 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001606 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 struct sighand_struct *sighand;
1608
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001609 if (to_self)
1610 parent = tsk->parent;
1611 else {
1612 tsk = tsk->group_leader;
1613 parent = tsk->real_parent;
1614 }
1615
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 info.si_signo = SIGCHLD;
1617 info.si_errno = 0;
1618 info.si_pid = tsk->pid;
1619 info.si_uid = tsk->uid;
1620
1621 /* FIXME: find out whether or not this is supposed to be c*time. */
1622 info.si_utime = cputime_to_jiffies(tsk->utime);
1623 info.si_stime = cputime_to_jiffies(tsk->stime);
1624
1625 info.si_code = why;
1626 switch (why) {
1627 case CLD_CONTINUED:
1628 info.si_status = SIGCONT;
1629 break;
1630 case CLD_STOPPED:
1631 info.si_status = tsk->signal->group_exit_code & 0x7f;
1632 break;
1633 case CLD_TRAPPED:
1634 info.si_status = tsk->exit_code & 0x7f;
1635 break;
1636 default:
1637 BUG();
1638 }
1639
1640 sighand = parent->sighand;
1641 spin_lock_irqsave(&sighand->siglock, flags);
1642 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1643 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1644 __group_send_sig_info(SIGCHLD, &info, parent);
1645 /*
1646 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1647 */
1648 __wake_up_parent(tsk, parent);
1649 spin_unlock_irqrestore(&sighand->siglock, flags);
1650}
1651
1652/*
1653 * This must be called with current->sighand->siglock held.
1654 *
1655 * This should be the path for all ptrace stops.
1656 * We always set current->last_siginfo while stopped here.
1657 * That makes it a way to test a stopped process for
1658 * being ptrace-stopped vs being job-control-stopped.
1659 *
1660 * If we actually decide not to stop at all because the tracer is gone,
1661 * we leave nostop_code in current->exit_code.
1662 */
1663static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1664{
1665 /*
1666 * If there is a group stop in progress,
1667 * we must participate in the bookkeeping.
1668 */
1669 if (current->signal->group_stop_count > 0)
1670 --current->signal->group_stop_count;
1671
1672 current->last_siginfo = info;
1673 current->exit_code = exit_code;
1674
1675 /* Let the debugger run. */
1676 set_current_state(TASK_TRACED);
1677 spin_unlock_irq(&current->sighand->siglock);
1678 read_lock(&tasklist_lock);
1679 if (likely(current->ptrace & PT_PTRACED) &&
1680 likely(current->parent != current->real_parent ||
1681 !(current->ptrace & PT_ATTACHED)) &&
1682 (likely(current->parent->signal != current->signal) ||
1683 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001684 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 read_unlock(&tasklist_lock);
1686 schedule();
1687 } else {
1688 /*
1689 * By the time we got the lock, our tracer went away.
1690 * Don't stop here.
1691 */
1692 read_unlock(&tasklist_lock);
1693 set_current_state(TASK_RUNNING);
1694 current->exit_code = nostop_code;
1695 }
1696
1697 /*
1698 * We are back. Now reacquire the siglock before touching
1699 * last_siginfo, so that we are sure to have synchronized with
1700 * any signal-sending on another CPU that wants to examine it.
1701 */
1702 spin_lock_irq(&current->sighand->siglock);
1703 current->last_siginfo = NULL;
1704
1705 /*
1706 * Queued signals ignored us while we were stopped for tracing.
1707 * So check for any that we should take before resuming user mode.
1708 */
1709 recalc_sigpending();
1710}
1711
1712void ptrace_notify(int exit_code)
1713{
1714 siginfo_t info;
1715
1716 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1717
1718 memset(&info, 0, sizeof info);
1719 info.si_signo = SIGTRAP;
1720 info.si_code = exit_code;
1721 info.si_pid = current->pid;
1722 info.si_uid = current->uid;
1723
1724 /* Let the debugger run. */
1725 spin_lock_irq(&current->sighand->siglock);
1726 ptrace_stop(exit_code, 0, &info);
1727 spin_unlock_irq(&current->sighand->siglock);
1728}
1729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730static void
1731finish_stop(int stop_count)
1732{
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001733 int to_self;
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 /*
1736 * If there are no other threads in the group, or if there is
1737 * a group stop in progress and we are the last to stop,
1738 * report to the parent. When ptraced, every thread reports itself.
1739 */
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001740 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1741 to_self = 1;
1742 else if (stop_count == 0)
1743 to_self = 0;
1744 else
1745 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001747 read_lock(&tasklist_lock);
1748 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1749 read_unlock(&tasklist_lock);
1750
1751out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 schedule();
1753 /*
1754 * Now we don't run again until continued.
1755 */
1756 current->exit_code = 0;
1757}
1758
1759/*
1760 * This performs the stopping for SIGSTOP and other stop signals.
1761 * We have to stop all threads in the thread group.
1762 * Returns nonzero if we've actually stopped and released the siglock.
1763 * Returns zero if we didn't stop and still hold the siglock.
1764 */
1765static int
1766do_signal_stop(int signr)
1767{
1768 struct signal_struct *sig = current->signal;
1769 struct sighand_struct *sighand = current->sighand;
1770 int stop_count = -1;
1771
1772 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1773 return 0;
1774
1775 if (sig->group_stop_count > 0) {
1776 /*
1777 * There is a group stop in progress. We don't need to
1778 * start another one.
1779 */
1780 signr = sig->group_exit_code;
1781 stop_count = --sig->group_stop_count;
1782 current->exit_code = signr;
1783 set_current_state(TASK_STOPPED);
1784 if (stop_count == 0)
1785 sig->flags = SIGNAL_STOP_STOPPED;
1786 spin_unlock_irq(&sighand->siglock);
1787 }
1788 else if (thread_group_empty(current)) {
1789 /*
1790 * Lock must be held through transition to stopped state.
1791 */
1792 current->exit_code = current->signal->group_exit_code = signr;
1793 set_current_state(TASK_STOPPED);
1794 sig->flags = SIGNAL_STOP_STOPPED;
1795 spin_unlock_irq(&sighand->siglock);
1796 }
1797 else {
1798 /*
1799 * There is no group stop already in progress.
1800 * We must initiate one now, but that requires
1801 * dropping siglock to get both the tasklist lock
1802 * and siglock again in the proper order. Note that
1803 * this allows an intervening SIGCONT to be posted.
1804 * We need to check for that and bail out if necessary.
1805 */
1806 struct task_struct *t;
1807
1808 spin_unlock_irq(&sighand->siglock);
1809
1810 /* signals can be posted during this window */
1811
1812 read_lock(&tasklist_lock);
1813 spin_lock_irq(&sighand->siglock);
1814
1815 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1816 /*
1817 * Another stop or continue happened while we
1818 * didn't have the lock. We can just swallow this
1819 * signal now. If we raced with a SIGCONT, that
1820 * should have just cleared it now. If we raced
1821 * with another processor delivering a stop signal,
1822 * then the SIGCONT that wakes us up should clear it.
1823 */
1824 read_unlock(&tasklist_lock);
1825 return 0;
1826 }
1827
1828 if (sig->group_stop_count == 0) {
1829 sig->group_exit_code = signr;
1830 stop_count = 0;
1831 for (t = next_thread(current); t != current;
1832 t = next_thread(t))
1833 /*
1834 * Setting state to TASK_STOPPED for a group
1835 * stop is always done with the siglock held,
1836 * so this check has no races.
1837 */
Roland McGrath5acbc5c2005-09-29 14:54:42 -07001838 if (!t->exit_state &&
1839 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 stop_count++;
1841 signal_wake_up(t, 0);
1842 }
1843 sig->group_stop_count = stop_count;
1844 }
1845 else {
1846 /* A race with another thread while unlocked. */
1847 signr = sig->group_exit_code;
1848 stop_count = --sig->group_stop_count;
1849 }
1850
1851 current->exit_code = signr;
1852 set_current_state(TASK_STOPPED);
1853 if (stop_count == 0)
1854 sig->flags = SIGNAL_STOP_STOPPED;
1855
1856 spin_unlock_irq(&sighand->siglock);
1857 read_unlock(&tasklist_lock);
1858 }
1859
1860 finish_stop(stop_count);
1861 return 1;
1862}
1863
1864/*
1865 * Do appropriate magic when group_stop_count > 0.
1866 * We return nonzero if we stopped, after releasing the siglock.
1867 * We return zero if we still hold the siglock and should look
1868 * for another signal without checking group_stop_count again.
1869 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001870static int handle_group_stop(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871{
1872 int stop_count;
1873
1874 if (current->signal->group_exit_task == current) {
1875 /*
1876 * Group stop is so we can do a core dump,
1877 * We are the initiating thread, so get on with it.
1878 */
1879 current->signal->group_exit_task = NULL;
1880 return 0;
1881 }
1882
1883 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1884 /*
1885 * Group stop is so another thread can do a core dump,
1886 * or else we are racing against a death signal.
1887 * Just punt the stop so we can get the next signal.
1888 */
1889 return 0;
1890
1891 /*
1892 * There is a group stop in progress. We stop
1893 * without any associated signal being in our queue.
1894 */
1895 stop_count = --current->signal->group_stop_count;
1896 if (stop_count == 0)
1897 current->signal->flags = SIGNAL_STOP_STOPPED;
1898 current->exit_code = current->signal->group_exit_code;
1899 set_current_state(TASK_STOPPED);
1900 spin_unlock_irq(&current->sighand->siglock);
1901 finish_stop(stop_count);
1902 return 1;
1903}
1904
1905int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1906 struct pt_regs *regs, void *cookie)
1907{
1908 sigset_t *mask = &current->blocked;
1909 int signr = 0;
1910
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08001911 try_to_freeze();
1912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913relock:
1914 spin_lock_irq(&current->sighand->siglock);
1915 for (;;) {
1916 struct k_sigaction *ka;
1917
1918 if (unlikely(current->signal->group_stop_count > 0) &&
1919 handle_group_stop())
1920 goto relock;
1921
1922 signr = dequeue_signal(current, mask, info);
1923
1924 if (!signr)
1925 break; /* will return 0 */
1926
1927 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1928 ptrace_signal_deliver(regs, cookie);
1929
1930 /* Let the debugger run. */
1931 ptrace_stop(signr, signr, info);
1932
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001933 /* We're back. Did the debugger cancel the sig or group_exit? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 signr = current->exit_code;
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001935 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 continue;
1937
1938 current->exit_code = 0;
1939
1940 /* Update the siginfo structure if the signal has
1941 changed. If the debugger wanted something
1942 specific in the siginfo structure then it should
1943 have updated *info via PTRACE_SETSIGINFO. */
1944 if (signr != info->si_signo) {
1945 info->si_signo = signr;
1946 info->si_errno = 0;
1947 info->si_code = SI_USER;
1948 info->si_pid = current->parent->pid;
1949 info->si_uid = current->parent->uid;
1950 }
1951
1952 /* If the (new) signal is now blocked, requeue it. */
1953 if (sigismember(&current->blocked, signr)) {
1954 specific_send_sig_info(signr, info, current);
1955 continue;
1956 }
1957 }
1958
1959 ka = &current->sighand->action[signr-1];
1960 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1961 continue;
1962 if (ka->sa.sa_handler != SIG_DFL) {
1963 /* Run the handler. */
1964 *return_ka = *ka;
1965
1966 if (ka->sa.sa_flags & SA_ONESHOT)
1967 ka->sa.sa_handler = SIG_DFL;
1968
1969 break; /* will return non-zero "signr" value */
1970 }
1971
1972 /*
1973 * Now we are doing the default action for this signal.
1974 */
1975 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1976 continue;
1977
1978 /* Init gets no signals it doesn't want. */
Eric W. Biedermanfef23e72006-03-28 16:10:58 -08001979 if (current == child_reaper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 continue;
1981
1982 if (sig_kernel_stop(signr)) {
1983 /*
1984 * The default action is to stop all threads in
1985 * the thread group. The job control signals
1986 * do nothing in an orphaned pgrp, but SIGSTOP
1987 * always works. Note that siglock needs to be
1988 * dropped during the call to is_orphaned_pgrp()
1989 * because of lock ordering with tasklist_lock.
1990 * This allows an intervening SIGCONT to be posted.
1991 * We need to check for that and bail out if necessary.
1992 */
1993 if (signr != SIGSTOP) {
1994 spin_unlock_irq(&current->sighand->siglock);
1995
1996 /* signals can be posted during this window */
1997
1998 if (is_orphaned_pgrp(process_group(current)))
1999 goto relock;
2000
2001 spin_lock_irq(&current->sighand->siglock);
2002 }
2003
2004 if (likely(do_signal_stop(signr))) {
2005 /* It released the siglock. */
2006 goto relock;
2007 }
2008
2009 /*
2010 * We didn't actually stop, due to a race
2011 * with SIGCONT or something like that.
2012 */
2013 continue;
2014 }
2015
2016 spin_unlock_irq(&current->sighand->siglock);
2017
2018 /*
2019 * Anything else is fatal, maybe with a core dump.
2020 */
2021 current->flags |= PF_SIGNALED;
2022 if (sig_kernel_coredump(signr)) {
2023 /*
2024 * If it was able to dump core, this kills all
2025 * other threads in the group and synchronizes with
2026 * their demise. If we lost the race with another
2027 * thread getting here, it set group_exit_code
2028 * first and our do_group_exit call below will use
2029 * that value and ignore the one we pass it.
2030 */
2031 do_coredump((long)signr, signr, regs);
2032 }
2033
2034 /*
2035 * Death signals, no core dump.
2036 */
2037 do_group_exit(signr);
2038 /* NOTREACHED */
2039 }
2040 spin_unlock_irq(&current->sighand->siglock);
2041 return signr;
2042}
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044EXPORT_SYMBOL(recalc_sigpending);
2045EXPORT_SYMBOL_GPL(dequeue_signal);
2046EXPORT_SYMBOL(flush_signals);
2047EXPORT_SYMBOL(force_sig);
2048EXPORT_SYMBOL(kill_pg);
2049EXPORT_SYMBOL(kill_proc);
2050EXPORT_SYMBOL(ptrace_notify);
2051EXPORT_SYMBOL(send_sig);
2052EXPORT_SYMBOL(send_sig_info);
2053EXPORT_SYMBOL(sigprocmask);
2054EXPORT_SYMBOL(block_all_signals);
2055EXPORT_SYMBOL(unblock_all_signals);
2056
2057
2058/*
2059 * System call entry points.
2060 */
2061
2062asmlinkage long sys_restart_syscall(void)
2063{
2064 struct restart_block *restart = &current_thread_info()->restart_block;
2065 return restart->fn(restart);
2066}
2067
2068long do_no_restart_syscall(struct restart_block *param)
2069{
2070 return -EINTR;
2071}
2072
2073/*
2074 * We don't need to get the kernel lock - this is all local to this
2075 * particular thread.. (and that's good, because this is _heavily_
2076 * used by various programs)
2077 */
2078
2079/*
2080 * This is also useful for kernel threads that want to temporarily
2081 * (or permanently) block certain signals.
2082 *
2083 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2084 * interface happily blocks "unblockable" signals like SIGKILL
2085 * and friends.
2086 */
2087int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2088{
2089 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002092 if (oldset)
2093 *oldset = current->blocked;
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 error = 0;
2096 switch (how) {
2097 case SIG_BLOCK:
2098 sigorsets(&current->blocked, &current->blocked, set);
2099 break;
2100 case SIG_UNBLOCK:
2101 signandsets(&current->blocked, &current->blocked, set);
2102 break;
2103 case SIG_SETMASK:
2104 current->blocked = *set;
2105 break;
2106 default:
2107 error = -EINVAL;
2108 }
2109 recalc_sigpending();
2110 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 return error;
2113}
2114
2115asmlinkage long
2116sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2117{
2118 int error = -EINVAL;
2119 sigset_t old_set, new_set;
2120
2121 /* XXX: Don't preclude handling different sized sigset_t's. */
2122 if (sigsetsize != sizeof(sigset_t))
2123 goto out;
2124
2125 if (set) {
2126 error = -EFAULT;
2127 if (copy_from_user(&new_set, set, sizeof(*set)))
2128 goto out;
2129 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2130
2131 error = sigprocmask(how, &new_set, &old_set);
2132 if (error)
2133 goto out;
2134 if (oset)
2135 goto set_old;
2136 } else if (oset) {
2137 spin_lock_irq(&current->sighand->siglock);
2138 old_set = current->blocked;
2139 spin_unlock_irq(&current->sighand->siglock);
2140
2141 set_old:
2142 error = -EFAULT;
2143 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2144 goto out;
2145 }
2146 error = 0;
2147out:
2148 return error;
2149}
2150
2151long do_sigpending(void __user *set, unsigned long sigsetsize)
2152{
2153 long error = -EINVAL;
2154 sigset_t pending;
2155
2156 if (sigsetsize > sizeof(sigset_t))
2157 goto out;
2158
2159 spin_lock_irq(&current->sighand->siglock);
2160 sigorsets(&pending, &current->pending.signal,
2161 &current->signal->shared_pending.signal);
2162 spin_unlock_irq(&current->sighand->siglock);
2163
2164 /* Outside the lock because only this thread touches it. */
2165 sigandsets(&pending, &current->blocked, &pending);
2166
2167 error = -EFAULT;
2168 if (!copy_to_user(set, &pending, sigsetsize))
2169 error = 0;
2170
2171out:
2172 return error;
2173}
2174
2175asmlinkage long
2176sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2177{
2178 return do_sigpending(set, sigsetsize);
2179}
2180
2181#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2182
2183int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2184{
2185 int err;
2186
2187 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2188 return -EFAULT;
2189 if (from->si_code < 0)
2190 return __copy_to_user(to, from, sizeof(siginfo_t))
2191 ? -EFAULT : 0;
2192 /*
2193 * If you change siginfo_t structure, please be sure
2194 * this code is fixed accordingly.
2195 * It should never copy any pad contained in the structure
2196 * to avoid security leaks, but must copy the generic
2197 * 3 ints plus the relevant union member.
2198 */
2199 err = __put_user(from->si_signo, &to->si_signo);
2200 err |= __put_user(from->si_errno, &to->si_errno);
2201 err |= __put_user((short)from->si_code, &to->si_code);
2202 switch (from->si_code & __SI_MASK) {
2203 case __SI_KILL:
2204 err |= __put_user(from->si_pid, &to->si_pid);
2205 err |= __put_user(from->si_uid, &to->si_uid);
2206 break;
2207 case __SI_TIMER:
2208 err |= __put_user(from->si_tid, &to->si_tid);
2209 err |= __put_user(from->si_overrun, &to->si_overrun);
2210 err |= __put_user(from->si_ptr, &to->si_ptr);
2211 break;
2212 case __SI_POLL:
2213 err |= __put_user(from->si_band, &to->si_band);
2214 err |= __put_user(from->si_fd, &to->si_fd);
2215 break;
2216 case __SI_FAULT:
2217 err |= __put_user(from->si_addr, &to->si_addr);
2218#ifdef __ARCH_SI_TRAPNO
2219 err |= __put_user(from->si_trapno, &to->si_trapno);
2220#endif
2221 break;
2222 case __SI_CHLD:
2223 err |= __put_user(from->si_pid, &to->si_pid);
2224 err |= __put_user(from->si_uid, &to->si_uid);
2225 err |= __put_user(from->si_status, &to->si_status);
2226 err |= __put_user(from->si_utime, &to->si_utime);
2227 err |= __put_user(from->si_stime, &to->si_stime);
2228 break;
2229 case __SI_RT: /* This is not generated by the kernel as of now. */
2230 case __SI_MESGQ: /* But this is */
2231 err |= __put_user(from->si_pid, &to->si_pid);
2232 err |= __put_user(from->si_uid, &to->si_uid);
2233 err |= __put_user(from->si_ptr, &to->si_ptr);
2234 break;
2235 default: /* this is just in case for now ... */
2236 err |= __put_user(from->si_pid, &to->si_pid);
2237 err |= __put_user(from->si_uid, &to->si_uid);
2238 break;
2239 }
2240 return err;
2241}
2242
2243#endif
2244
2245asmlinkage long
2246sys_rt_sigtimedwait(const sigset_t __user *uthese,
2247 siginfo_t __user *uinfo,
2248 const struct timespec __user *uts,
2249 size_t sigsetsize)
2250{
2251 int ret, sig;
2252 sigset_t these;
2253 struct timespec ts;
2254 siginfo_t info;
2255 long timeout = 0;
2256
2257 /* XXX: Don't preclude handling different sized sigset_t's. */
2258 if (sigsetsize != sizeof(sigset_t))
2259 return -EINVAL;
2260
2261 if (copy_from_user(&these, uthese, sizeof(these)))
2262 return -EFAULT;
2263
2264 /*
2265 * Invert the set of allowed signals to get those we
2266 * want to block.
2267 */
2268 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2269 signotset(&these);
2270
2271 if (uts) {
2272 if (copy_from_user(&ts, uts, sizeof(ts)))
2273 return -EFAULT;
2274 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2275 || ts.tv_sec < 0)
2276 return -EINVAL;
2277 }
2278
2279 spin_lock_irq(&current->sighand->siglock);
2280 sig = dequeue_signal(current, &these, &info);
2281 if (!sig) {
2282 timeout = MAX_SCHEDULE_TIMEOUT;
2283 if (uts)
2284 timeout = (timespec_to_jiffies(&ts)
2285 + (ts.tv_sec || ts.tv_nsec));
2286
2287 if (timeout) {
2288 /* None ready -- temporarily unblock those we're
2289 * interested while we are sleeping in so that we'll
2290 * be awakened when they arrive. */
2291 current->real_blocked = current->blocked;
2292 sigandsets(&current->blocked, &current->blocked, &these);
2293 recalc_sigpending();
2294 spin_unlock_irq(&current->sighand->siglock);
2295
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07002296 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 spin_lock_irq(&current->sighand->siglock);
2299 sig = dequeue_signal(current, &these, &info);
2300 current->blocked = current->real_blocked;
2301 siginitset(&current->real_blocked, 0);
2302 recalc_sigpending();
2303 }
2304 }
2305 spin_unlock_irq(&current->sighand->siglock);
2306
2307 if (sig) {
2308 ret = sig;
2309 if (uinfo) {
2310 if (copy_siginfo_to_user(uinfo, &info))
2311 ret = -EFAULT;
2312 }
2313 } else {
2314 ret = -EAGAIN;
2315 if (timeout)
2316 ret = -EINTR;
2317 }
2318
2319 return ret;
2320}
2321
2322asmlinkage long
2323sys_kill(int pid, int sig)
2324{
2325 struct siginfo info;
2326
2327 info.si_signo = sig;
2328 info.si_errno = 0;
2329 info.si_code = SI_USER;
2330 info.si_pid = current->tgid;
2331 info.si_uid = current->uid;
2332
2333 return kill_something_info(sig, &info, pid);
2334}
2335
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002336static int do_tkill(int tgid, int pid, int sig)
2337{
2338 int error;
2339 struct siginfo info;
2340 struct task_struct *p;
2341
2342 error = -ESRCH;
2343 info.si_signo = sig;
2344 info.si_errno = 0;
2345 info.si_code = SI_TKILL;
2346 info.si_pid = current->tgid;
2347 info.si_uid = current->uid;
2348
2349 read_lock(&tasklist_lock);
2350 p = find_task_by_pid(pid);
2351 if (p && (tgid <= 0 || p->tgid == tgid)) {
2352 error = check_kill_permission(sig, &info, p);
2353 /*
2354 * The null signal is a permissions and process existence
2355 * probe. No signal is actually delivered.
2356 */
2357 if (!error && sig && p->sighand) {
2358 spin_lock_irq(&p->sighand->siglock);
2359 handle_stop_signal(sig, p);
2360 error = specific_send_sig_info(sig, &info, p);
2361 spin_unlock_irq(&p->sighand->siglock);
2362 }
2363 }
2364 read_unlock(&tasklist_lock);
2365
2366 return error;
2367}
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369/**
2370 * sys_tgkill - send signal to one specific thread
2371 * @tgid: the thread group ID of the thread
2372 * @pid: the PID of the thread
2373 * @sig: signal to be sent
2374 *
2375 * This syscall also checks the tgid and returns -ESRCH even if the PID
2376 * exists but it's not belonging to the target process anymore. This
2377 * method solves the problem of threads exiting and PIDs getting reused.
2378 */
2379asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2380{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 /* This is only valid for single tasks */
2382 if (pid <= 0 || tgid <= 0)
2383 return -EINVAL;
2384
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002385 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387
2388/*
2389 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2390 */
2391asmlinkage long
2392sys_tkill(int pid, int sig)
2393{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 /* This is only valid for single tasks */
2395 if (pid <= 0)
2396 return -EINVAL;
2397
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002398 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399}
2400
2401asmlinkage long
2402sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2403{
2404 siginfo_t info;
2405
2406 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2407 return -EFAULT;
2408
2409 /* Not even root can pretend to send signals from the kernel.
2410 Nor can they impersonate a kill(), which adds source info. */
2411 if (info.si_code >= 0)
2412 return -EPERM;
2413 info.si_signo = sig;
2414
2415 /* POSIX.1b doesn't mention process groups. */
2416 return kill_proc_info(sig, &info, pid);
2417}
2418
2419int
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002420do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421{
2422 struct k_sigaction *k;
George Anzinger71fabd5e2006-01-08 01:02:48 -08002423 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Jesper Juhl7ed20e12005-05-01 08:59:14 -07002425 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 return -EINVAL;
2427
2428 k = &current->sighand->action[sig-1];
2429
2430 spin_lock_irq(&current->sighand->siglock);
2431 if (signal_pending(current)) {
2432 /*
2433 * If there might be a fatal signal pending on multiple
2434 * threads, make sure we take it before changing the action.
2435 */
2436 spin_unlock_irq(&current->sighand->siglock);
2437 return -ERESTARTNOINTR;
2438 }
2439
2440 if (oact)
2441 *oact = *k;
2442
2443 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002444 sigdelsetmask(&act->sa.sa_mask,
2445 sigmask(SIGKILL) | sigmask(SIGSTOP));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 /*
2447 * POSIX 3.3.1.3:
2448 * "Setting a signal action to SIG_IGN for a signal that is
2449 * pending shall cause the pending signal to be discarded,
2450 * whether or not it is blocked."
2451 *
2452 * "Setting a signal action to SIG_DFL for a signal that is
2453 * pending and whose default action is to ignore the signal
2454 * (for example, SIGCHLD), shall cause the pending signal to
2455 * be discarded, whether or not it is blocked"
2456 */
2457 if (act->sa.sa_handler == SIG_IGN ||
2458 (act->sa.sa_handler == SIG_DFL &&
2459 sig_kernel_ignore(sig))) {
2460 /*
2461 * This is a fairly rare case, so we only take the
2462 * tasklist_lock once we're sure we'll need it.
2463 * Now we must do this little unlock and relock
2464 * dance to maintain the lock hierarchy.
2465 */
2466 struct task_struct *t = current;
2467 spin_unlock_irq(&t->sighand->siglock);
2468 read_lock(&tasklist_lock);
2469 spin_lock_irq(&t->sighand->siglock);
2470 *k = *act;
George Anzinger71fabd5e2006-01-08 01:02:48 -08002471 sigemptyset(&mask);
2472 sigaddset(&mask, sig);
2473 rm_from_queue_full(&mask, &t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 do {
George Anzinger71fabd5e2006-01-08 01:02:48 -08002475 rm_from_queue_full(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 recalc_sigpending_tsk(t);
2477 t = next_thread(t);
2478 } while (t != current);
2479 spin_unlock_irq(&current->sighand->siglock);
2480 read_unlock(&tasklist_lock);
2481 return 0;
2482 }
2483
2484 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 }
2486
2487 spin_unlock_irq(&current->sighand->siglock);
2488 return 0;
2489}
2490
2491int
2492do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2493{
2494 stack_t oss;
2495 int error;
2496
2497 if (uoss) {
2498 oss.ss_sp = (void __user *) current->sas_ss_sp;
2499 oss.ss_size = current->sas_ss_size;
2500 oss.ss_flags = sas_ss_flags(sp);
2501 }
2502
2503 if (uss) {
2504 void __user *ss_sp;
2505 size_t ss_size;
2506 int ss_flags;
2507
2508 error = -EFAULT;
2509 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2510 || __get_user(ss_sp, &uss->ss_sp)
2511 || __get_user(ss_flags, &uss->ss_flags)
2512 || __get_user(ss_size, &uss->ss_size))
2513 goto out;
2514
2515 error = -EPERM;
2516 if (on_sig_stack(sp))
2517 goto out;
2518
2519 error = -EINVAL;
2520 /*
2521 *
2522 * Note - this code used to test ss_flags incorrectly
2523 * old code may have been written using ss_flags==0
2524 * to mean ss_flags==SS_ONSTACK (as this was the only
2525 * way that worked) - this fix preserves that older
2526 * mechanism
2527 */
2528 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2529 goto out;
2530
2531 if (ss_flags == SS_DISABLE) {
2532 ss_size = 0;
2533 ss_sp = NULL;
2534 } else {
2535 error = -ENOMEM;
2536 if (ss_size < MINSIGSTKSZ)
2537 goto out;
2538 }
2539
2540 current->sas_ss_sp = (unsigned long) ss_sp;
2541 current->sas_ss_size = ss_size;
2542 }
2543
2544 if (uoss) {
2545 error = -EFAULT;
2546 if (copy_to_user(uoss, &oss, sizeof(oss)))
2547 goto out;
2548 }
2549
2550 error = 0;
2551out:
2552 return error;
2553}
2554
2555#ifdef __ARCH_WANT_SYS_SIGPENDING
2556
2557asmlinkage long
2558sys_sigpending(old_sigset_t __user *set)
2559{
2560 return do_sigpending(set, sizeof(*set));
2561}
2562
2563#endif
2564
2565#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2566/* Some platforms have their own version with special arguments others
2567 support only sys_rt_sigprocmask. */
2568
2569asmlinkage long
2570sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2571{
2572 int error;
2573 old_sigset_t old_set, new_set;
2574
2575 if (set) {
2576 error = -EFAULT;
2577 if (copy_from_user(&new_set, set, sizeof(*set)))
2578 goto out;
2579 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2580
2581 spin_lock_irq(&current->sighand->siglock);
2582 old_set = current->blocked.sig[0];
2583
2584 error = 0;
2585 switch (how) {
2586 default:
2587 error = -EINVAL;
2588 break;
2589 case SIG_BLOCK:
2590 sigaddsetmask(&current->blocked, new_set);
2591 break;
2592 case SIG_UNBLOCK:
2593 sigdelsetmask(&current->blocked, new_set);
2594 break;
2595 case SIG_SETMASK:
2596 current->blocked.sig[0] = new_set;
2597 break;
2598 }
2599
2600 recalc_sigpending();
2601 spin_unlock_irq(&current->sighand->siglock);
2602 if (error)
2603 goto out;
2604 if (oset)
2605 goto set_old;
2606 } else if (oset) {
2607 old_set = current->blocked.sig[0];
2608 set_old:
2609 error = -EFAULT;
2610 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2611 goto out;
2612 }
2613 error = 0;
2614out:
2615 return error;
2616}
2617#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2618
2619#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2620asmlinkage long
2621sys_rt_sigaction(int sig,
2622 const struct sigaction __user *act,
2623 struct sigaction __user *oact,
2624 size_t sigsetsize)
2625{
2626 struct k_sigaction new_sa, old_sa;
2627 int ret = -EINVAL;
2628
2629 /* XXX: Don't preclude handling different sized sigset_t's. */
2630 if (sigsetsize != sizeof(sigset_t))
2631 goto out;
2632
2633 if (act) {
2634 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2635 return -EFAULT;
2636 }
2637
2638 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2639
2640 if (!ret && oact) {
2641 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2642 return -EFAULT;
2643 }
2644out:
2645 return ret;
2646}
2647#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2648
2649#ifdef __ARCH_WANT_SYS_SGETMASK
2650
2651/*
2652 * For backwards compatibility. Functionality superseded by sigprocmask.
2653 */
2654asmlinkage long
2655sys_sgetmask(void)
2656{
2657 /* SMP safe */
2658 return current->blocked.sig[0];
2659}
2660
2661asmlinkage long
2662sys_ssetmask(int newmask)
2663{
2664 int old;
2665
2666 spin_lock_irq(&current->sighand->siglock);
2667 old = current->blocked.sig[0];
2668
2669 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2670 sigmask(SIGSTOP)));
2671 recalc_sigpending();
2672 spin_unlock_irq(&current->sighand->siglock);
2673
2674 return old;
2675}
2676#endif /* __ARCH_WANT_SGETMASK */
2677
2678#ifdef __ARCH_WANT_SYS_SIGNAL
2679/*
2680 * For backwards compatibility. Functionality superseded by sigaction.
2681 */
2682asmlinkage unsigned long
2683sys_signal(int sig, __sighandler_t handler)
2684{
2685 struct k_sigaction new_sa, old_sa;
2686 int ret;
2687
2688 new_sa.sa.sa_handler = handler;
2689 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03002690 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 ret = do_sigaction(sig, &new_sa, &old_sa);
2693
2694 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2695}
2696#endif /* __ARCH_WANT_SYS_SIGNAL */
2697
2698#ifdef __ARCH_WANT_SYS_PAUSE
2699
2700asmlinkage long
2701sys_pause(void)
2702{
2703 current->state = TASK_INTERRUPTIBLE;
2704 schedule();
2705 return -ERESTARTNOHAND;
2706}
2707
2708#endif
2709
David Woodhouse150256d2006-01-18 17:43:57 -08002710#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2711asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2712{
2713 sigset_t newset;
2714
2715 /* XXX: Don't preclude handling different sized sigset_t's. */
2716 if (sigsetsize != sizeof(sigset_t))
2717 return -EINVAL;
2718
2719 if (copy_from_user(&newset, unewset, sizeof(newset)))
2720 return -EFAULT;
2721 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2722
2723 spin_lock_irq(&current->sighand->siglock);
2724 current->saved_sigmask = current->blocked;
2725 current->blocked = newset;
2726 recalc_sigpending();
2727 spin_unlock_irq(&current->sighand->siglock);
2728
2729 current->state = TASK_INTERRUPTIBLE;
2730 schedule();
2731 set_thread_flag(TIF_RESTORE_SIGMASK);
2732 return -ERESTARTNOHAND;
2733}
2734#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736void __init signals_init(void)
2737{
2738 sigqueue_cachep =
2739 kmem_cache_create("sigqueue",
2740 sizeof(struct sigqueue),
2741 __alignof__(struct sigqueue),
2742 SLAB_PANIC, NULL, NULL);
2743}