blob: 1d7f4463c32d15c14a2b1f9e366da57a35ab20ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070026#include <linux/signal.h>
Steve Grubbc2f0c7c2005-05-06 12:38:39 +010027#include <linux/audit.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080028#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
33
34/*
35 * SLAB caches for signal bits.
36 */
37
38static kmem_cache_t *sigqueue_cachep;
39
40/*
41 * In POSIX a signal is sent either to a specific thread (Linux task)
42 * or to the process as a whole (Linux thread group). How the signal
43 * is sent determines whether it's to one thread or the whole group,
44 * which determines which signal mask(s) are involved in blocking it
45 * from being delivered until later. When the signal is delivered,
46 * either it's caught or ignored by a user handler or it has a default
47 * effect that applies to the whole thread group (POSIX process).
48 *
49 * The possible effects an unblocked signal set to SIG_DFL can have are:
50 * ignore - Nothing Happens
51 * terminate - kill the process, i.e. all threads in the group,
52 * similar to exit_group. The group leader (only) reports
53 * WIFSIGNALED status to its parent.
54 * coredump - write a core dump file describing all threads using
55 * the same mm and then kill all those threads
56 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 *
58 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59 * Other signals when not blocked and set to SIG_DFL behaves as follows.
60 * The job control signals also have other special effects.
61 *
62 * +--------------------+------------------+
63 * | POSIX signal | default action |
64 * +--------------------+------------------+
65 * | SIGHUP | terminate |
66 * | SIGINT | terminate |
67 * | SIGQUIT | coredump |
68 * | SIGILL | coredump |
69 * | SIGTRAP | coredump |
70 * | SIGABRT/SIGIOT | coredump |
71 * | SIGBUS | coredump |
72 * | SIGFPE | coredump |
73 * | SIGKILL | terminate(+) |
74 * | SIGUSR1 | terminate |
75 * | SIGSEGV | coredump |
76 * | SIGUSR2 | terminate |
77 * | SIGPIPE | terminate |
78 * | SIGALRM | terminate |
79 * | SIGTERM | terminate |
80 * | SIGCHLD | ignore |
81 * | SIGCONT | ignore(*) |
82 * | SIGSTOP | stop(*)(+) |
83 * | SIGTSTP | stop(*) |
84 * | SIGTTIN | stop(*) |
85 * | SIGTTOU | stop(*) |
86 * | SIGURG | ignore |
87 * | SIGXCPU | coredump |
88 * | SIGXFSZ | coredump |
89 * | SIGVTALRM | terminate |
90 * | SIGPROF | terminate |
91 * | SIGPOLL/SIGIO | terminate |
92 * | SIGSYS/SIGUNUSED | coredump |
93 * | SIGSTKFLT | terminate |
94 * | SIGWINCH | ignore |
95 * | SIGPWR | terminate |
96 * | SIGRTMIN-SIGRTMAX | terminate |
97 * +--------------------+------------------+
98 * | non-POSIX signal | default action |
99 * +--------------------+------------------+
100 * | SIGEMT | coredump |
101 * +--------------------+------------------+
102 *
103 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104 * (*) Special job control effects:
105 * When SIGCONT is sent, it resumes the process (all threads in the group)
106 * from TASK_STOPPED state and also clears any pending/queued stop signals
107 * (any of those marked with "stop(*)"). This happens regardless of blocking,
108 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
109 * any pending/queued SIGCONT signals; this happens regardless of blocking,
110 * catching, or ignored the stop signal, though (except for SIGSTOP) the
111 * default action of stopping the process may happen later or never.
112 */
113
114#ifdef SIGEMT
115#define M_SIGEMT M(SIGEMT)
116#else
117#define M_SIGEMT 0
118#endif
119
120#if SIGRTMIN > BITS_PER_LONG
121#define M(sig) (1ULL << ((sig)-1))
122#else
123#define M(sig) (1UL << ((sig)-1))
124#endif
125#define T(sig, mask) (M(sig) & (mask))
126
127#define SIG_KERNEL_ONLY_MASK (\
128 M(SIGKILL) | M(SIGSTOP) )
129
130#define SIG_KERNEL_STOP_MASK (\
131 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132
133#define SIG_KERNEL_COREDUMP_MASK (\
134 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
135 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
136 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137
138#define SIG_KERNEL_IGNORE_MASK (\
139 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140
141#define sig_kernel_only(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
143#define sig_kernel_coredump(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
145#define sig_kernel_ignore(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
147#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149
Oleg Nesterova9e88e82006-03-28 16:11:14 -0800150#define sig_needs_tasklist(sig) \
151 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157#define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160
161static int sig_ignored(struct task_struct *t, int sig)
162{
163 void __user * handler;
164
165 /*
166 * Tracers always want to know about signals..
167 */
168 if (t->ptrace & PT_PTRACED)
169 return 0;
170
171 /*
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
174 * unblocked.
175 */
176 if (sigismember(&t->blocked, sig))
177 return 0;
178
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
183}
184
185/*
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
188 */
189static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190{
191 unsigned long ready;
192 long i;
193
194 switch (_NSIG_WORDS) {
195 default:
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
198 break;
199
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
208 break;
209
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 }
212 return ready != 0;
213}
214
215#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216
217fastcall void recalc_sigpending_tsk(struct task_struct *t)
218{
219 if (t->signal->group_stop_count > 0 ||
Christoph Lameter3e1d1d22005-06-24 23:13:50 -0700220 (freezing(t)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 PENDING(&t->pending, &t->blocked) ||
222 PENDING(&t->signal->shared_pending, &t->blocked))
223 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 else
225 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226}
227
228void recalc_sigpending(void)
229{
230 recalc_sigpending_tsk(current);
231}
232
233/* Given the mask, find the first available signal that should be serviced. */
234
235static int
236next_signal(struct sigpending *pending, sigset_t *mask)
237{
238 unsigned long i, *s, *m, x;
239 int sig = 0;
240
241 s = pending->signal.sig;
242 m = mask->sig;
243 switch (_NSIG_WORDS) {
244 default:
245 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246 if ((x = *s &~ *m) != 0) {
247 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 break;
249 }
250 break;
251
252 case 2: if ((x = s[0] &~ m[0]) != 0)
253 sig = 1;
254 else if ((x = s[1] &~ m[1]) != 0)
255 sig = _NSIG_BPW + 1;
256 else
257 break;
258 sig += ffz(~x);
259 break;
260
261 case 1: if ((x = *s &~ *m) != 0)
262 sig = ffz(~x) + 1;
263 break;
264 }
265
266 return sig;
267}
268
Al Virodd0fc662005-10-07 07:46:04 +0100269static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 int override_rlimit)
271{
272 struct sigqueue *q = NULL;
273
274 atomic_inc(&t->user->sigpending);
275 if (override_rlimit ||
276 atomic_read(&t->user->sigpending) <=
277 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278 q = kmem_cache_alloc(sigqueue_cachep, flags);
279 if (unlikely(q == NULL)) {
280 atomic_dec(&t->user->sigpending);
281 } else {
282 INIT_LIST_HEAD(&q->list);
283 q->flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 q->user = get_uid(t->user);
285 }
286 return(q);
287}
288
Andrew Morton514a01b2006-02-03 03:04:41 -0800289static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
291 if (q->flags & SIGQUEUE_PREALLOC)
292 return;
293 atomic_dec(&q->user->sigpending);
294 free_uid(q->user);
295 kmem_cache_free(sigqueue_cachep, q);
296}
297
298static void flush_sigqueue(struct sigpending *queue)
299{
300 struct sigqueue *q;
301
302 sigemptyset(&queue->signal);
303 while (!list_empty(&queue->list)) {
304 q = list_entry(queue->list.next, struct sigqueue , list);
305 list_del_init(&q->list);
306 __sigqueue_free(q);
307 }
308}
309
310/*
311 * Flush all pending signals for a task.
312 */
313
314void
315flush_signals(struct task_struct *t)
316{
317 unsigned long flags;
318
319 spin_lock_irqsave(&t->sighand->siglock, flags);
320 clear_tsk_thread_flag(t,TIF_SIGPENDING);
321 flush_sigqueue(&t->pending);
322 flush_sigqueue(&t->signal->shared_pending);
323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
324}
325
326/*
327 * This function expects the tasklist_lock write-locked.
328 */
329void __exit_sighand(struct task_struct *tsk)
330{
331 struct sighand_struct * sighand = tsk->sighand;
332
333 /* Ok, we're done with the signal handlers */
334 tsk->sighand = NULL;
335 if (atomic_dec_and_test(&sighand->count))
Oleg Nesterovaa1757f2006-03-28 16:11:12 -0800336 kmem_cache_free(sighand_cachep, sighand);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339/*
340 * This function expects the tasklist_lock write-locked.
341 */
342void __exit_signal(struct task_struct *tsk)
343{
344 struct signal_struct * sig = tsk->signal;
Ingo Molnare56d0902006-01-08 01:01:37 -0800345 struct sighand_struct * sighand;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 if (!sig)
348 BUG();
349 if (!atomic_read(&sig->count))
350 BUG();
Ingo Molnare56d0902006-01-08 01:01:37 -0800351 rcu_read_lock();
352 sighand = rcu_dereference(tsk->sighand);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 spin_lock(&sighand->siglock);
354 posix_cpu_timers_exit(tsk);
355 if (atomic_dec_and_test(&sig->count)) {
356 posix_cpu_timers_exit_group(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 tsk->signal = NULL;
Ingo Molnare56d0902006-01-08 01:01:37 -0800358 __exit_sighand(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 spin_unlock(&sighand->siglock);
360 flush_sigqueue(&sig->shared_pending);
361 } else {
362 /*
363 * If there is any task waiting for the group exit
364 * then notify it:
365 */
366 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
367 wake_up_process(sig->group_exit_task);
368 sig->group_exit_task = NULL;
369 }
370 if (tsk == sig->curr_target)
371 sig->curr_target = next_thread(tsk);
372 tsk->signal = NULL;
373 /*
374 * Accumulate here the counters for all threads but the
375 * group leader as they die, so they can be added into
376 * the process-wide totals when those are taken.
377 * The group leader stays around as a zombie as long
378 * as there are other threads. When it gets reaped,
379 * the exit.c code will add its counts into these totals.
380 * We won't ever get here for the group leader, since it
381 * will have been the last reference on the signal_struct.
382 */
383 sig->utime = cputime_add(sig->utime, tsk->utime);
384 sig->stime = cputime_add(sig->stime, tsk->stime);
385 sig->min_flt += tsk->min_flt;
386 sig->maj_flt += tsk->maj_flt;
387 sig->nvcsw += tsk->nvcsw;
388 sig->nivcsw += tsk->nivcsw;
389 sig->sched_time += tsk->sched_time;
Ingo Molnare56d0902006-01-08 01:01:37 -0800390 __exit_sighand(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 spin_unlock(&sighand->siglock);
392 sig = NULL; /* Marker for below. */
393 }
Ingo Molnare56d0902006-01-08 01:01:37 -0800394 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending);
397 if (sig) {
398 /*
Roland McGrath25f407f2005-10-21 15:03:29 -0700399 * We are cleaning up the signal_struct here.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 exit_thread_group_keys(sig);
402 kmem_cache_free(signal_cachep, sig);
403 }
404}
405
406void exit_signal(struct task_struct *tsk)
407{
Oleg Nesterov8d027de2005-10-29 19:37:40 +0400408 atomic_dec(&tsk->signal->live);
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 write_lock_irq(&tasklist_lock);
411 __exit_signal(tsk);
412 write_unlock_irq(&tasklist_lock);
413}
414
415/*
416 * Flush all handlers for a task.
417 */
418
419void
420flush_signal_handlers(struct task_struct *t, int force_default)
421{
422 int i;
423 struct k_sigaction *ka = &t->sighand->action[0];
424 for (i = _NSIG ; i != 0 ; i--) {
425 if (force_default || ka->sa.sa_handler != SIG_IGN)
426 ka->sa.sa_handler = SIG_DFL;
427 ka->sa.sa_flags = 0;
428 sigemptyset(&ka->sa.sa_mask);
429 ka++;
430 }
431}
432
433
434/* Notify the system that a driver wants to block all signals for this
435 * process, and wants to be notified if any signals at all were to be
436 * sent/acted upon. If the notifier routine returns non-zero, then the
437 * signal will be acted upon after all. If the notifier routine returns 0,
438 * then then signal will be blocked. Only one block per process is
439 * allowed. priv is a pointer to private data that the notifier routine
440 * can use to determine if the signal should be blocked or not. */
441
442void
443block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
444{
445 unsigned long flags;
446
447 spin_lock_irqsave(&current->sighand->siglock, flags);
448 current->notifier_mask = mask;
449 current->notifier_data = priv;
450 current->notifier = notifier;
451 spin_unlock_irqrestore(&current->sighand->siglock, flags);
452}
453
454/* Notify the system that blocking has ended. */
455
456void
457unblock_all_signals(void)
458{
459 unsigned long flags;
460
461 spin_lock_irqsave(&current->sighand->siglock, flags);
462 current->notifier = NULL;
463 current->notifier_data = NULL;
464 recalc_sigpending();
465 spin_unlock_irqrestore(&current->sighand->siglock, flags);
466}
467
Arjan van de Ven858119e2006-01-14 13:20:43 -0800468static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
470 struct sigqueue *q, *first = NULL;
471 int still_pending = 0;
472
473 if (unlikely(!sigismember(&list->signal, sig)))
474 return 0;
475
476 /*
477 * Collect the siginfo appropriate to this signal. Check if
478 * there is another siginfo for the same signal.
479 */
480 list_for_each_entry(q, &list->list, list) {
481 if (q->info.si_signo == sig) {
482 if (first) {
483 still_pending = 1;
484 break;
485 }
486 first = q;
487 }
488 }
489 if (first) {
490 list_del_init(&first->list);
491 copy_siginfo(info, &first->info);
492 __sigqueue_free(first);
493 if (!still_pending)
494 sigdelset(&list->signal, sig);
495 } else {
496
497 /* Ok, it wasn't in the queue. This must be
498 a fast-pathed signal or we must have been
499 out of queue space. So zero out the info.
500 */
501 sigdelset(&list->signal, sig);
502 info->si_signo = sig;
503 info->si_errno = 0;
504 info->si_code = 0;
505 info->si_pid = 0;
506 info->si_uid = 0;
507 }
508 return 1;
509}
510
511static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
512 siginfo_t *info)
513{
514 int sig = 0;
515
Heiko Carstensb17b0422005-11-13 16:07:14 -0800516 sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (sig) {
518 if (current->notifier) {
519 if (sigismember(current->notifier_mask, sig)) {
520 if (!(current->notifier)(current->notifier_data)) {
521 clear_thread_flag(TIF_SIGPENDING);
522 return 0;
523 }
524 }
525 }
526
527 if (!collect_signal(sig, pending, info))
528 sig = 0;
529
530 }
531 recalc_sigpending();
532
533 return sig;
534}
535
536/*
537 * Dequeue a signal and return the element to the caller, which is
538 * expected to free it.
539 *
540 * All callers have to hold the siglock.
541 */
542int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
543{
544 int signr = __dequeue_signal(&tsk->pending, mask, info);
545 if (!signr)
546 signr = __dequeue_signal(&tsk->signal->shared_pending,
547 mask, info);
548 if (signr && unlikely(sig_kernel_stop(signr))) {
549 /*
550 * Set a marker that we have dequeued a stop signal. Our
551 * caller might release the siglock and then the pending
552 * stop signal it is about to process is no longer in the
553 * pending bitmasks, but must still be cleared by a SIGCONT
554 * (and overruled by a SIGKILL). So those cases clear this
555 * shared flag after we've set it. Note that this flag may
556 * remain set after the signal we return is ignored or
557 * handled. That doesn't matter because its only purpose
558 * is to alert stop-signal processing code when another
559 * processor has come along and cleared the flag.
560 */
Oleg Nesterov788e05a2005-10-07 17:46:19 +0400561 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
562 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 }
564 if ( signr &&
565 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
566 info->si_sys_private){
567 /*
568 * Release the siglock to ensure proper locking order
569 * of timer locks outside of siglocks. Note, we leave
570 * irqs disabled here, since the posix-timers code is
571 * about to disable them again anyway.
572 */
573 spin_unlock(&tsk->sighand->siglock);
574 do_schedule_next_timer(info);
575 spin_lock(&tsk->sighand->siglock);
576 }
577 return signr;
578}
579
580/*
581 * Tell a process that it has a new active signal..
582 *
583 * NOTE! we rely on the previous spin_lock to
584 * lock interrupts for us! We can only be called with
585 * "siglock" held, and the local interrupt must
586 * have been disabled when that got acquired!
587 *
588 * No need to set need_resched since signal event passing
589 * goes through ->blocked
590 */
591void signal_wake_up(struct task_struct *t, int resume)
592{
593 unsigned int mask;
594
595 set_tsk_thread_flag(t, TIF_SIGPENDING);
596
597 /*
598 * For SIGKILL, we want to wake it up in the stopped/traced case.
599 * We don't check t->state here because there is a race with it
600 * executing another processor and just now entering stopped state.
601 * By using wake_up_state, we ensure the process will wake up and
602 * handle its death signal.
603 */
604 mask = TASK_INTERRUPTIBLE;
605 if (resume)
606 mask |= TASK_STOPPED | TASK_TRACED;
607 if (!wake_up_state(t, mask))
608 kick_process(t);
609}
610
611/*
612 * Remove signals in mask from the pending set and queue.
613 * Returns 1 if any signals were found.
614 *
615 * All callers must be holding the siglock.
George Anzinger71fabd5e2006-01-08 01:02:48 -0800616 *
617 * This version takes a sigset mask and looks at all signals,
618 * not just those in the first mask word.
619 */
620static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
621{
622 struct sigqueue *q, *n;
623 sigset_t m;
624
625 sigandsets(&m, mask, &s->signal);
626 if (sigisemptyset(&m))
627 return 0;
628
629 signandsets(&s->signal, &s->signal, mask);
630 list_for_each_entry_safe(q, n, &s->list, list) {
631 if (sigismember(mask, q->info.si_signo)) {
632 list_del_init(&q->list);
633 __sigqueue_free(q);
634 }
635 }
636 return 1;
637}
638/*
639 * Remove signals in mask from the pending set and queue.
640 * Returns 1 if any signals were found.
641 *
642 * All callers must be holding the siglock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 */
644static int rm_from_queue(unsigned long mask, struct sigpending *s)
645{
646 struct sigqueue *q, *n;
647
648 if (!sigtestsetmask(&s->signal, mask))
649 return 0;
650
651 sigdelsetmask(&s->signal, mask);
652 list_for_each_entry_safe(q, n, &s->list, list) {
653 if (q->info.si_signo < SIGRTMIN &&
654 (mask & sigmask(q->info.si_signo))) {
655 list_del_init(&q->list);
656 __sigqueue_free(q);
657 }
658 }
659 return 1;
660}
661
662/*
663 * Bad permissions for sending the signal
664 */
665static int check_kill_permission(int sig, struct siginfo *info,
666 struct task_struct *t)
667{
668 int error = -EINVAL;
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700669 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return error;
671 error = -EPERM;
Oleg Nesterov621d3122005-10-30 15:03:45 -0800672 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 && ((sig != SIGCONT) ||
674 (current->signal->session != t->signal->session))
675 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
676 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
677 && !capable(CAP_KILL))
678 return error;
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100679
680 error = security_task_kill(t, info, sig);
681 if (!error)
682 audit_signal_info(sig, t); /* Let audit system see the signal */
683 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
686/* forward decl */
687static void do_notify_parent_cldstop(struct task_struct *tsk,
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700688 int to_self,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 int why);
690
691/*
692 * Handle magic process-wide effects of stop/continue signals.
693 * Unlike the signal actions, these happen immediately at signal-generation
694 * time regardless of blocking, ignoring, or handling. This does the
695 * actual continuing for SIGCONT, but not the actual stopping for stop
696 * signals. The process stop is done as a signal action for SIG_DFL.
697 */
698static void handle_stop_signal(int sig, struct task_struct *p)
699{
700 struct task_struct *t;
701
Bhavesh P. Davdadd12f482005-08-17 12:26:33 -0600702 if (p->signal->flags & SIGNAL_GROUP_EXIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 /*
704 * The process is in the middle of dying already.
705 */
706 return;
707
708 if (sig_kernel_stop(sig)) {
709 /*
710 * This is a stop signal. Remove SIGCONT from all queues.
711 */
712 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
713 t = p;
714 do {
715 rm_from_queue(sigmask(SIGCONT), &t->pending);
716 t = next_thread(t);
717 } while (t != p);
718 } else if (sig == SIGCONT) {
719 /*
720 * Remove all stop signals from all queues,
721 * and wake all threads.
722 */
723 if (unlikely(p->signal->group_stop_count > 0)) {
724 /*
725 * There was a group stop in progress. We'll
726 * pretend it finished before we got here. We are
727 * obliged to report it to the parent: if the
728 * SIGSTOP happened "after" this SIGCONT, then it
729 * would have cleared this pending SIGCONT. If it
730 * happened "before" this SIGCONT, then the parent
731 * got the SIGCHLD about the stop finishing before
732 * the continue happened. We do the notification
733 * now, and it's as if the stop had finished and
734 * the SIGCHLD was pending on entry to this kill.
735 */
736 p->signal->group_stop_count = 0;
737 p->signal->flags = SIGNAL_STOP_CONTINUED;
738 spin_unlock(&p->sighand->siglock);
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700739 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 spin_lock(&p->sighand->siglock);
741 }
742 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
743 t = p;
744 do {
745 unsigned int state;
746 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
747
748 /*
749 * If there is a handler for SIGCONT, we must make
750 * sure that no thread returns to user mode before
751 * we post the signal, in case it was the only
752 * thread eligible to run the signal handler--then
753 * it must not do anything between resuming and
754 * running the handler. With the TIF_SIGPENDING
755 * flag set, the thread will pause and acquire the
756 * siglock that we hold now and until we've queued
757 * the pending signal.
758 *
759 * Wake up the stopped thread _after_ setting
760 * TIF_SIGPENDING
761 */
762 state = TASK_STOPPED;
763 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
764 set_tsk_thread_flag(t, TIF_SIGPENDING);
765 state |= TASK_INTERRUPTIBLE;
766 }
767 wake_up_state(t, state);
768
769 t = next_thread(t);
770 } while (t != p);
771
772 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
773 /*
774 * We were in fact stopped, and are now continued.
775 * Notify the parent with CLD_CONTINUED.
776 */
777 p->signal->flags = SIGNAL_STOP_CONTINUED;
778 p->signal->group_exit_code = 0;
779 spin_unlock(&p->sighand->siglock);
Oleg Nesterovbc505a42005-09-06 15:17:32 -0700780 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 spin_lock(&p->sighand->siglock);
782 } else {
783 /*
784 * We are not stopped, but there could be a stop
785 * signal in the middle of being processed after
786 * being removed from the queue. Clear that too.
787 */
788 p->signal->flags = 0;
789 }
790 } else if (sig == SIGKILL) {
791 /*
792 * Make sure that any pending stop signal already dequeued
793 * is undone by the wakeup for SIGKILL.
794 */
795 p->signal->flags = 0;
796 }
797}
798
799static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
800 struct sigpending *signals)
801{
802 struct sigqueue * q = NULL;
803 int ret = 0;
804
805 /*
806 * fast-pathed signals for kernel-internal things like SIGSTOP
807 * or SIGKILL.
808 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800809 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 goto out_set;
811
812 /* Real-time signals must be queued if sent by sigqueue, or
813 some other real-time mechanism. It is implementation
814 defined whether kill() does so. We attempt to do so, on
815 the principle of least surprise, but since kill is not
816 allowed to fail with EAGAIN when low on memory we just
817 make sure at least one signal gets delivered and don't
818 pass on the info struct. */
819
820 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
Oleg Nesterov621d3122005-10-30 15:03:45 -0800821 (is_si_special(info) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 info->si_code >= 0)));
823 if (q) {
824 list_add_tail(&q->list, &signals->list);
825 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800826 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 q->info.si_signo = sig;
828 q->info.si_errno = 0;
829 q->info.si_code = SI_USER;
830 q->info.si_pid = current->pid;
831 q->info.si_uid = current->uid;
832 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800833 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 q->info.si_signo = sig;
835 q->info.si_errno = 0;
836 q->info.si_code = SI_KERNEL;
837 q->info.si_pid = 0;
838 q->info.si_uid = 0;
839 break;
840 default:
841 copy_siginfo(&q->info, info);
842 break;
843 }
Oleg Nesterov621d3122005-10-30 15:03:45 -0800844 } else if (!is_si_special(info)) {
845 if (sig >= SIGRTMIN && info->si_code != SI_USER)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /*
847 * Queue overflow, abort. We may abort if the signal was rt
848 * and sent by user using something other than kill().
849 */
850 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852
853out_set:
854 sigaddset(&signals->signal, sig);
855 return ret;
856}
857
858#define LEGACY_QUEUE(sigptr, sig) \
859 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
860
861
862static int
863specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
864{
865 int ret = 0;
866
867 if (!irqs_disabled())
868 BUG();
869 assert_spin_locked(&t->sighand->siglock);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 /* Short-circuit ignored signals. */
872 if (sig_ignored(t, sig))
873 goto out;
874
875 /* Support queueing exactly one non-rt signal, so that we
876 can get more detailed information about the cause of
877 the signal. */
878 if (LEGACY_QUEUE(&t->pending, sig))
879 goto out;
880
881 ret = send_signal(sig, info, t, &t->pending);
882 if (!ret && !sigismember(&t->blocked, sig))
883 signal_wake_up(t, sig == SIGKILL);
884out:
885 return ret;
886}
887
888/*
889 * Force a signal that the process can't ignore: if necessary
890 * we unblock the signal and change any SIG_IGN to SIG_DFL.
891 */
892
893int
894force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
895{
896 unsigned long int flags;
897 int ret;
898
899 spin_lock_irqsave(&t->sighand->siglock, flags);
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800900 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800903 if (sigismember(&t->blocked, sig)) {
904 sigdelset(&t->blocked, sig);
905 }
906 recalc_sigpending_tsk(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 ret = specific_send_sig_info(sig, info, t);
908 spin_unlock_irqrestore(&t->sighand->siglock, flags);
909
910 return ret;
911}
912
913void
914force_sig_specific(int sig, struct task_struct *t)
915{
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800916 force_sig_info(sig, SEND_SIG_FORCED, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
919/*
920 * Test if P wants to take SIG. After we've checked all threads with this,
921 * it's equivalent to finding no threads not blocking SIG. Any threads not
922 * blocking SIG were ruled out because they are not running and already
923 * have pending signals. Such threads will dequeue from the shared queue
924 * as soon as they're available, so putting the signal on the shared queue
925 * will be equivalent to sending it to one such thread.
926 */
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700927static inline int wants_signal(int sig, struct task_struct *p)
928{
929 if (sigismember(&p->blocked, sig))
930 return 0;
931 if (p->flags & PF_EXITING)
932 return 0;
933 if (sig == SIGKILL)
934 return 1;
935 if (p->state & (TASK_STOPPED | TASK_TRACED))
936 return 0;
937 return task_curr(p) || !signal_pending(p);
938}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
940static void
941__group_complete_signal(int sig, struct task_struct *p)
942{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 struct task_struct *t;
944
945 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 * Now find a thread we can wake up to take the signal off the queue.
947 *
948 * If the main thread wants the signal, it gets first crack.
949 * Probably the least surprising to the average bear.
950 */
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700951 if (wants_signal(sig, p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 t = p;
953 else if (thread_group_empty(p))
954 /*
955 * There is just one thread and it does not need to be woken.
956 * It will dequeue unblocked signals before it runs again.
957 */
958 return;
959 else {
960 /*
961 * Otherwise try to find a suitable thread.
962 */
963 t = p->signal->curr_target;
964 if (t == NULL)
965 /* restart balancing at this thread */
966 t = p->signal->curr_target = p;
967 BUG_ON(t->tgid != p->tgid);
968
Linus Torvalds188a1eaf2005-09-23 13:22:21 -0700969 while (!wants_signal(sig, t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 t = next_thread(t);
971 if (t == p->signal->curr_target)
972 /*
973 * No thread needs to be woken.
974 * Any eligible threads will see
975 * the signal in the queue soon.
976 */
977 return;
978 }
979 p->signal->curr_target = t;
980 }
981
982 /*
983 * Found a killable thread. If the signal will be fatal,
984 * then start taking the whole group down immediately.
985 */
986 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
987 !sigismember(&t->real_blocked, sig) &&
988 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
989 /*
990 * This signal will be fatal to the whole group.
991 */
992 if (!sig_kernel_coredump(sig)) {
993 /*
994 * Start a group exit and wake everybody up.
995 * This way we don't have other threads
996 * running and doing things after a slower
997 * thread has the fatal signal pending.
998 */
999 p->signal->flags = SIGNAL_GROUP_EXIT;
1000 p->signal->group_exit_code = sig;
1001 p->signal->group_stop_count = 0;
1002 t = p;
1003 do {
1004 sigaddset(&t->pending.signal, SIGKILL);
1005 signal_wake_up(t, 1);
1006 t = next_thread(t);
1007 } while (t != p);
1008 return;
1009 }
1010
1011 /*
1012 * There will be a core dump. We make all threads other
1013 * than the chosen one go into a group stop so that nothing
1014 * happens until it gets scheduled, takes the signal off
1015 * the shared queue, and does the core dump. This is a
1016 * little more complicated than strictly necessary, but it
1017 * keeps the signal state that winds up in the core dump
1018 * unchanged from the death state, e.g. which thread had
1019 * the core-dump signal unblocked.
1020 */
1021 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1022 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1023 p->signal->group_stop_count = 0;
1024 p->signal->group_exit_task = t;
1025 t = p;
1026 do {
1027 p->signal->group_stop_count++;
1028 signal_wake_up(t, 0);
1029 t = next_thread(t);
1030 } while (t != p);
1031 wake_up_process(p->signal->group_exit_task);
1032 return;
1033 }
1034
1035 /*
1036 * The signal is already in the shared-pending queue.
1037 * Tell the chosen thread to wake up and dequeue it.
1038 */
1039 signal_wake_up(t, sig == SIGKILL);
1040 return;
1041}
1042
1043int
1044__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1045{
1046 int ret = 0;
1047
1048 assert_spin_locked(&p->sighand->siglock);
1049 handle_stop_signal(sig, p);
1050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 /* Short-circuit ignored signals. */
1052 if (sig_ignored(p, sig))
1053 return ret;
1054
1055 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1056 /* This is a non-RT signal and we already have one queued. */
1057 return ret;
1058
1059 /*
1060 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1061 * We always use the shared queue for process-wide signals,
1062 * to avoid several races.
1063 */
1064 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1065 if (unlikely(ret))
1066 return ret;
1067
1068 __group_complete_signal(sig, p);
1069 return 0;
1070}
1071
1072/*
1073 * Nuke all other threads in the group.
1074 */
1075void zap_other_threads(struct task_struct *p)
1076{
1077 struct task_struct *t;
1078
1079 p->signal->flags = SIGNAL_GROUP_EXIT;
1080 p->signal->group_stop_count = 0;
1081
1082 if (thread_group_empty(p))
1083 return;
1084
1085 for (t = next_thread(p); t != p; t = next_thread(t)) {
1086 /*
1087 * Don't bother with already dead threads
1088 */
1089 if (t->exit_state)
1090 continue;
1091
1092 /*
1093 * We don't want to notify the parent, since we are
1094 * killed as part of a thread group due to another
1095 * thread doing an execve() or similar. So set the
1096 * exit signal to -1 to allow immediate reaping of
1097 * the process. But don't detach the thread group
1098 * leader.
1099 */
1100 if (t != p->group_leader)
1101 t->exit_signal = -1;
1102
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001103 /* SIGKILL will be handled before any pending SIGSTOP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 signal_wake_up(t, 1);
1106 }
1107}
1108
1109/*
Ingo Molnare56d0902006-01-08 01:01:37 -08001110 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 */
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001112struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1113{
1114 struct sighand_struct *sighand;
1115
1116 for (;;) {
1117 sighand = rcu_dereference(tsk->sighand);
1118 if (unlikely(sighand == NULL))
1119 break;
1120
1121 spin_lock_irqsave(&sighand->siglock, *flags);
1122 if (likely(sighand == tsk->sighand))
1123 break;
1124 spin_unlock_irqrestore(&sighand->siglock, *flags);
1125 }
1126
1127 return sighand;
1128}
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1131{
1132 unsigned long flags;
1133 int ret;
1134
1135 ret = check_kill_permission(sig, info, p);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001136
1137 if (!ret && sig) {
1138 ret = -ESRCH;
1139 if (lock_task_sighand(p, &flags)) {
1140 ret = __group_send_sig_info(sig, info, p);
1141 unlock_task_sighand(p, &flags);
Ingo Molnare56d0902006-01-08 01:01:37 -08001142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
1144
1145 return ret;
1146}
1147
1148/*
1149 * kill_pg_info() sends a signal to a process group: this is what the tty
1150 * control characters do (^C, ^Z etc)
1151 */
1152
1153int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1154{
1155 struct task_struct *p = NULL;
1156 int retval, success;
1157
1158 if (pgrp <= 0)
1159 return -EINVAL;
1160
1161 success = 0;
1162 retval = -ESRCH;
1163 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1164 int err = group_send_sig_info(sig, info, p);
1165 success |= !err;
1166 retval = err;
1167 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1168 return success ? 0 : retval;
1169}
1170
1171int
1172kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1173{
1174 int retval;
1175
1176 read_lock(&tasklist_lock);
1177 retval = __kill_pg_info(sig, info, pgrp);
1178 read_unlock(&tasklist_lock);
1179
1180 return retval;
1181}
1182
1183int
1184kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1185{
1186 int error;
Ingo Molnare56d0902006-01-08 01:01:37 -08001187 int acquired_tasklist_lock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 struct task_struct *p;
1189
Ingo Molnare56d0902006-01-08 01:01:37 -08001190 rcu_read_lock();
Oleg Nesterova9e88e82006-03-28 16:11:14 -08001191 if (unlikely(sig_needs_tasklist(sig))) {
Ingo Molnare56d0902006-01-08 01:01:37 -08001192 read_lock(&tasklist_lock);
1193 acquired_tasklist_lock = 1;
1194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 p = find_task_by_pid(pid);
1196 error = -ESRCH;
1197 if (p)
1198 error = group_send_sig_info(sig, info, p);
Ingo Molnare56d0902006-01-08 01:01:37 -08001199 if (unlikely(acquired_tasklist_lock))
1200 read_unlock(&tasklist_lock);
1201 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 return error;
1203}
1204
Harald Welte46113832005-10-10 19:44:29 +02001205/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1206int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1207 uid_t uid, uid_t euid)
1208{
1209 int ret = -EINVAL;
1210 struct task_struct *p;
1211
1212 if (!valid_signal(sig))
1213 return ret;
1214
1215 read_lock(&tasklist_lock);
1216 p = find_task_by_pid(pid);
1217 if (!p) {
1218 ret = -ESRCH;
1219 goto out_unlock;
1220 }
Oleg Nesterov0811af22006-01-08 01:03:09 -08001221 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Harald Welte46113832005-10-10 19:44:29 +02001222 && (euid != p->suid) && (euid != p->uid)
1223 && (uid != p->suid) && (uid != p->uid)) {
1224 ret = -EPERM;
1225 goto out_unlock;
1226 }
1227 if (sig && p->sighand) {
1228 unsigned long flags;
1229 spin_lock_irqsave(&p->sighand->siglock, flags);
1230 ret = __group_send_sig_info(sig, info, p);
1231 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1232 }
1233out_unlock:
1234 read_unlock(&tasklist_lock);
1235 return ret;
1236}
1237EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
1239/*
1240 * kill_something_info() interprets pid in interesting ways just like kill(2).
1241 *
1242 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1243 * is probably wrong. Should make it like BSD or SYSV.
1244 */
1245
1246static int kill_something_info(int sig, struct siginfo *info, int pid)
1247{
1248 if (!pid) {
1249 return kill_pg_info(sig, info, process_group(current));
1250 } else if (pid == -1) {
1251 int retval = 0, count = 0;
1252 struct task_struct * p;
1253
1254 read_lock(&tasklist_lock);
1255 for_each_process(p) {
1256 if (p->pid > 1 && p->tgid != current->tgid) {
1257 int err = group_send_sig_info(sig, info, p);
1258 ++count;
1259 if (err != -EPERM)
1260 retval = err;
1261 }
1262 }
1263 read_unlock(&tasklist_lock);
1264 return count ? retval : -ESRCH;
1265 } else if (pid < 0) {
1266 return kill_pg_info(sig, info, -pid);
1267 } else {
1268 return kill_proc_info(sig, info, pid);
1269 }
1270}
1271
1272/*
1273 * These are for backward compatibility with the rest of the kernel source.
1274 */
1275
1276/*
1277 * These two are the most common entry points. They send a signal
1278 * just to the specific thread.
1279 */
1280int
1281send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282{
1283 int ret;
1284 unsigned long flags;
1285
1286 /*
1287 * Make sure legacy kernel users don't send in bad values
1288 * (normal paths check this in check_kill_permission).
1289 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001290 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 return -EINVAL;
1292
1293 /*
1294 * We need the tasklist lock even for the specific
1295 * thread case (when we don't need to follow the group
1296 * lists) in order to avoid races with "p->sighand"
1297 * going away or changing from under us.
1298 */
1299 read_lock(&tasklist_lock);
1300 spin_lock_irqsave(&p->sighand->siglock, flags);
1301 ret = specific_send_sig_info(sig, info, p);
1302 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1303 read_unlock(&tasklist_lock);
1304 return ret;
1305}
1306
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001307#define __si_special(priv) \
1308 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310int
1311send_sig(int sig, struct task_struct *p, int priv)
1312{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001313 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314}
1315
1316/*
1317 * This is the entry point for "process-wide" signals.
1318 * They will go to an appropriate thread in the thread group.
1319 */
1320int
1321send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1322{
1323 int ret;
1324 read_lock(&tasklist_lock);
1325 ret = group_send_sig_info(sig, info, p);
1326 read_unlock(&tasklist_lock);
1327 return ret;
1328}
1329
1330void
1331force_sig(int sig, struct task_struct *p)
1332{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001333 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334}
1335
1336/*
1337 * When things go south during signal handling, we
1338 * will force a SIGSEGV. And if the signal that caused
1339 * the problem was already a SIGSEGV, we'll want to
1340 * make sure we don't even try to deliver the signal..
1341 */
1342int
1343force_sigsegv(int sig, struct task_struct *p)
1344{
1345 if (sig == SIGSEGV) {
1346 unsigned long flags;
1347 spin_lock_irqsave(&p->sighand->siglock, flags);
1348 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1349 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1350 }
1351 force_sig(SIGSEGV, p);
1352 return 0;
1353}
1354
1355int
1356kill_pg(pid_t pgrp, int sig, int priv)
1357{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001358 return kill_pg_info(sig, __si_special(priv), pgrp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359}
1360
1361int
1362kill_proc(pid_t pid, int sig, int priv)
1363{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001364 return kill_proc_info(sig, __si_special(priv), pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365}
1366
1367/*
1368 * These functions support sending signals using preallocated sigqueue
1369 * structures. This is needed "because realtime applications cannot
1370 * afford to lose notifications of asynchronous events, like timer
1371 * expirations or I/O completions". In the case of Posix Timers
1372 * we allocate the sigqueue structure from the timer_create. If this
1373 * allocation fails we are able to report the failure to the application
1374 * with an EAGAIN error.
1375 */
1376
1377struct sigqueue *sigqueue_alloc(void)
1378{
1379 struct sigqueue *q;
1380
1381 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1382 q->flags |= SIGQUEUE_PREALLOC;
1383 return(q);
1384}
1385
1386void sigqueue_free(struct sigqueue *q)
1387{
1388 unsigned long flags;
1389 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1390 /*
1391 * If the signal is still pending remove it from the
1392 * pending queue.
1393 */
1394 if (unlikely(!list_empty(&q->list))) {
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001395 spinlock_t *lock = &current->sighand->siglock;
1396 read_lock(&tasklist_lock);
1397 spin_lock_irqsave(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (!list_empty(&q->list))
1399 list_del_init(&q->list);
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001400 spin_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 read_unlock(&tasklist_lock);
1402 }
1403 q->flags &= ~SIGQUEUE_PREALLOC;
1404 __sigqueue_free(q);
1405}
1406
1407int
1408send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1409{
1410 unsigned long flags;
1411 int ret = 0;
Ingo Molnare56d0902006-01-08 01:01:37 -08001412 struct sighand_struct *sh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001415
1416 /*
1417 * The rcu based delayed sighand destroy makes it possible to
1418 * run this without tasklist lock held. The task struct itself
1419 * cannot go away as create_timer did get_task_struct().
1420 *
1421 * We return -1, when the task is marked exiting, so
1422 * posix_timer_event can redirect it to the group leader
1423 */
1424 rcu_read_lock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001425
1426 if (unlikely(p->flags & PF_EXITING)) {
1427 ret = -1;
1428 goto out_err;
1429 }
1430
Ingo Molnare56d0902006-01-08 01:01:37 -08001431retry:
1432 sh = rcu_dereference(p->sighand);
1433
1434 spin_lock_irqsave(&sh->siglock, flags);
1435 if (p->sighand != sh) {
1436 /* We raced with exec() in a multithreaded process... */
1437 spin_unlock_irqrestore(&sh->siglock, flags);
1438 goto retry;
1439 }
1440
1441 /*
1442 * We do the check here again to handle the following scenario:
1443 *
1444 * CPU 0 CPU 1
1445 * send_sigqueue
1446 * check PF_EXITING
1447 * interrupt exit code running
1448 * __exit_signal
1449 * lock sighand->siglock
1450 * unlock sighand->siglock
1451 * lock sh->siglock
1452 * add(tsk->pending) flush_sigqueue(tsk->pending)
1453 *
1454 */
1455
1456 if (unlikely(p->flags & PF_EXITING)) {
1457 ret = -1;
1458 goto out;
1459 }
Oleg Nesterove752dd62005-09-06 15:17:42 -07001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 if (unlikely(!list_empty(&q->list))) {
1462 /*
1463 * If an SI_TIMER entry is already queue just increment
1464 * the overrun count.
1465 */
1466 if (q->info.si_code != SI_TIMER)
1467 BUG();
1468 q->info.si_overrun++;
1469 goto out;
Oleg Nesterove752dd62005-09-06 15:17:42 -07001470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 /* Short-circuit ignored signals. */
1472 if (sig_ignored(p, sig)) {
1473 ret = 1;
1474 goto out;
1475 }
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 list_add_tail(&q->list, &p->pending.list);
1478 sigaddset(&p->pending.signal, sig);
1479 if (!sigismember(&p->blocked, sig))
1480 signal_wake_up(p, sig == SIGKILL);
1481
1482out:
Ingo Molnare56d0902006-01-08 01:01:37 -08001483 spin_unlock_irqrestore(&sh->siglock, flags);
Oleg Nesterove752dd62005-09-06 15:17:42 -07001484out_err:
Ingo Molnare56d0902006-01-08 01:01:37 -08001485 rcu_read_unlock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001486
1487 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488}
1489
1490int
1491send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1492{
1493 unsigned long flags;
1494 int ret = 0;
1495
1496 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 read_lock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001499 /* Since it_lock is held, p->sighand cannot be NULL. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 spin_lock_irqsave(&p->sighand->siglock, flags);
1501 handle_stop_signal(sig, p);
1502
1503 /* Short-circuit ignored signals. */
1504 if (sig_ignored(p, sig)) {
1505 ret = 1;
1506 goto out;
1507 }
1508
1509 if (unlikely(!list_empty(&q->list))) {
1510 /*
1511 * If an SI_TIMER entry is already queue just increment
1512 * the overrun count. Other uses should not try to
1513 * send the signal multiple times.
1514 */
1515 if (q->info.si_code != SI_TIMER)
1516 BUG();
1517 q->info.si_overrun++;
1518 goto out;
1519 }
1520
1521 /*
1522 * Put this signal on the shared-pending queue.
1523 * We always use the shared queue for process-wide signals,
1524 * to avoid several races.
1525 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 list_add_tail(&q->list, &p->signal->shared_pending.list);
1527 sigaddset(&p->signal->shared_pending.signal, sig);
1528
1529 __group_complete_signal(sig, p);
1530out:
1531 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1532 read_unlock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001533 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
1536/*
1537 * Wake up any threads in the parent blocked in wait* syscalls.
1538 */
1539static inline void __wake_up_parent(struct task_struct *p,
1540 struct task_struct *parent)
1541{
1542 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1543}
1544
1545/*
1546 * Let a parent know about the death of a child.
1547 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1548 */
1549
1550void do_notify_parent(struct task_struct *tsk, int sig)
1551{
1552 struct siginfo info;
1553 unsigned long flags;
1554 struct sighand_struct *psig;
1555
1556 BUG_ON(sig == -1);
1557
1558 /* do_notify_parent_cldstop should have been called instead. */
1559 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1560
1561 BUG_ON(!tsk->ptrace &&
1562 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1563
1564 info.si_signo = sig;
1565 info.si_errno = 0;
1566 info.si_pid = tsk->pid;
1567 info.si_uid = tsk->uid;
1568
1569 /* FIXME: find out whether or not this is supposed to be c*time. */
1570 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1571 tsk->signal->utime));
1572 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1573 tsk->signal->stime));
1574
1575 info.si_status = tsk->exit_code & 0x7f;
1576 if (tsk->exit_code & 0x80)
1577 info.si_code = CLD_DUMPED;
1578 else if (tsk->exit_code & 0x7f)
1579 info.si_code = CLD_KILLED;
1580 else {
1581 info.si_code = CLD_EXITED;
1582 info.si_status = tsk->exit_code >> 8;
1583 }
1584
1585 psig = tsk->parent->sighand;
1586 spin_lock_irqsave(&psig->siglock, flags);
Oleg Nesterov7ed01752005-11-10 17:22:18 +03001587 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1589 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1590 /*
1591 * We are exiting and our parent doesn't care. POSIX.1
1592 * defines special semantics for setting SIGCHLD to SIG_IGN
1593 * or setting the SA_NOCLDWAIT flag: we should be reaped
1594 * automatically and not left for our parent's wait4 call.
1595 * Rather than having the parent do it as a magic kind of
1596 * signal handler, we just set this to tell do_exit that we
1597 * can be cleaned up without becoming a zombie. Note that
1598 * we still call __wake_up_parent in this case, because a
1599 * blocked sys_wait4 might now return -ECHILD.
1600 *
1601 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1602 * is implementation-defined: we do (if you don't want
1603 * it, just use SIG_IGN instead).
1604 */
1605 tsk->exit_signal = -1;
1606 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1607 sig = 0;
1608 }
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001609 if (valid_signal(sig) && sig > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 __group_send_sig_info(sig, &info, tsk->parent);
1611 __wake_up_parent(tsk, tsk->parent);
1612 spin_unlock_irqrestore(&psig->siglock, flags);
1613}
1614
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001615static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
1617 struct siginfo info;
1618 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001619 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 struct sighand_struct *sighand;
1621
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001622 if (to_self)
1623 parent = tsk->parent;
1624 else {
1625 tsk = tsk->group_leader;
1626 parent = tsk->real_parent;
1627 }
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 info.si_signo = SIGCHLD;
1630 info.si_errno = 0;
1631 info.si_pid = tsk->pid;
1632 info.si_uid = tsk->uid;
1633
1634 /* FIXME: find out whether or not this is supposed to be c*time. */
1635 info.si_utime = cputime_to_jiffies(tsk->utime);
1636 info.si_stime = cputime_to_jiffies(tsk->stime);
1637
1638 info.si_code = why;
1639 switch (why) {
1640 case CLD_CONTINUED:
1641 info.si_status = SIGCONT;
1642 break;
1643 case CLD_STOPPED:
1644 info.si_status = tsk->signal->group_exit_code & 0x7f;
1645 break;
1646 case CLD_TRAPPED:
1647 info.si_status = tsk->exit_code & 0x7f;
1648 break;
1649 default:
1650 BUG();
1651 }
1652
1653 sighand = parent->sighand;
1654 spin_lock_irqsave(&sighand->siglock, flags);
1655 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1656 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1657 __group_send_sig_info(SIGCHLD, &info, parent);
1658 /*
1659 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1660 */
1661 __wake_up_parent(tsk, parent);
1662 spin_unlock_irqrestore(&sighand->siglock, flags);
1663}
1664
1665/*
1666 * This must be called with current->sighand->siglock held.
1667 *
1668 * This should be the path for all ptrace stops.
1669 * We always set current->last_siginfo while stopped here.
1670 * That makes it a way to test a stopped process for
1671 * being ptrace-stopped vs being job-control-stopped.
1672 *
1673 * If we actually decide not to stop at all because the tracer is gone,
1674 * we leave nostop_code in current->exit_code.
1675 */
1676static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1677{
1678 /*
1679 * If there is a group stop in progress,
1680 * we must participate in the bookkeeping.
1681 */
1682 if (current->signal->group_stop_count > 0)
1683 --current->signal->group_stop_count;
1684
1685 current->last_siginfo = info;
1686 current->exit_code = exit_code;
1687
1688 /* Let the debugger run. */
1689 set_current_state(TASK_TRACED);
1690 spin_unlock_irq(&current->sighand->siglock);
1691 read_lock(&tasklist_lock);
1692 if (likely(current->ptrace & PT_PTRACED) &&
1693 likely(current->parent != current->real_parent ||
1694 !(current->ptrace & PT_ATTACHED)) &&
1695 (likely(current->parent->signal != current->signal) ||
1696 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001697 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 read_unlock(&tasklist_lock);
1699 schedule();
1700 } else {
1701 /*
1702 * By the time we got the lock, our tracer went away.
1703 * Don't stop here.
1704 */
1705 read_unlock(&tasklist_lock);
1706 set_current_state(TASK_RUNNING);
1707 current->exit_code = nostop_code;
1708 }
1709
1710 /*
1711 * We are back. Now reacquire the siglock before touching
1712 * last_siginfo, so that we are sure to have synchronized with
1713 * any signal-sending on another CPU that wants to examine it.
1714 */
1715 spin_lock_irq(&current->sighand->siglock);
1716 current->last_siginfo = NULL;
1717
1718 /*
1719 * Queued signals ignored us while we were stopped for tracing.
1720 * So check for any that we should take before resuming user mode.
1721 */
1722 recalc_sigpending();
1723}
1724
1725void ptrace_notify(int exit_code)
1726{
1727 siginfo_t info;
1728
1729 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1730
1731 memset(&info, 0, sizeof info);
1732 info.si_signo = SIGTRAP;
1733 info.si_code = exit_code;
1734 info.si_pid = current->pid;
1735 info.si_uid = current->uid;
1736
1737 /* Let the debugger run. */
1738 spin_lock_irq(&current->sighand->siglock);
1739 ptrace_stop(exit_code, 0, &info);
1740 spin_unlock_irq(&current->sighand->siglock);
1741}
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743static void
1744finish_stop(int stop_count)
1745{
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001746 int to_self;
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 /*
1749 * If there are no other threads in the group, or if there is
1750 * a group stop in progress and we are the last to stop,
1751 * report to the parent. When ptraced, every thread reports itself.
1752 */
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001753 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1754 to_self = 1;
1755 else if (stop_count == 0)
1756 to_self = 0;
1757 else
1758 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001760 read_lock(&tasklist_lock);
1761 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1762 read_unlock(&tasklist_lock);
1763
1764out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 schedule();
1766 /*
1767 * Now we don't run again until continued.
1768 */
1769 current->exit_code = 0;
1770}
1771
1772/*
1773 * This performs the stopping for SIGSTOP and other stop signals.
1774 * We have to stop all threads in the thread group.
1775 * Returns nonzero if we've actually stopped and released the siglock.
1776 * Returns zero if we didn't stop and still hold the siglock.
1777 */
1778static int
1779do_signal_stop(int signr)
1780{
1781 struct signal_struct *sig = current->signal;
1782 struct sighand_struct *sighand = current->sighand;
1783 int stop_count = -1;
1784
1785 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1786 return 0;
1787
1788 if (sig->group_stop_count > 0) {
1789 /*
1790 * There is a group stop in progress. We don't need to
1791 * start another one.
1792 */
1793 signr = sig->group_exit_code;
1794 stop_count = --sig->group_stop_count;
1795 current->exit_code = signr;
1796 set_current_state(TASK_STOPPED);
1797 if (stop_count == 0)
1798 sig->flags = SIGNAL_STOP_STOPPED;
1799 spin_unlock_irq(&sighand->siglock);
1800 }
1801 else if (thread_group_empty(current)) {
1802 /*
1803 * Lock must be held through transition to stopped state.
1804 */
1805 current->exit_code = current->signal->group_exit_code = signr;
1806 set_current_state(TASK_STOPPED);
1807 sig->flags = SIGNAL_STOP_STOPPED;
1808 spin_unlock_irq(&sighand->siglock);
1809 }
1810 else {
1811 /*
1812 * There is no group stop already in progress.
1813 * We must initiate one now, but that requires
1814 * dropping siglock to get both the tasklist lock
1815 * and siglock again in the proper order. Note that
1816 * this allows an intervening SIGCONT to be posted.
1817 * We need to check for that and bail out if necessary.
1818 */
1819 struct task_struct *t;
1820
1821 spin_unlock_irq(&sighand->siglock);
1822
1823 /* signals can be posted during this window */
1824
1825 read_lock(&tasklist_lock);
1826 spin_lock_irq(&sighand->siglock);
1827
1828 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1829 /*
1830 * Another stop or continue happened while we
1831 * didn't have the lock. We can just swallow this
1832 * signal now. If we raced with a SIGCONT, that
1833 * should have just cleared it now. If we raced
1834 * with another processor delivering a stop signal,
1835 * then the SIGCONT that wakes us up should clear it.
1836 */
1837 read_unlock(&tasklist_lock);
1838 return 0;
1839 }
1840
1841 if (sig->group_stop_count == 0) {
1842 sig->group_exit_code = signr;
1843 stop_count = 0;
1844 for (t = next_thread(current); t != current;
1845 t = next_thread(t))
1846 /*
1847 * Setting state to TASK_STOPPED for a group
1848 * stop is always done with the siglock held,
1849 * so this check has no races.
1850 */
Roland McGrath5acbc5c2005-09-29 14:54:42 -07001851 if (!t->exit_state &&
1852 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 stop_count++;
1854 signal_wake_up(t, 0);
1855 }
1856 sig->group_stop_count = stop_count;
1857 }
1858 else {
1859 /* A race with another thread while unlocked. */
1860 signr = sig->group_exit_code;
1861 stop_count = --sig->group_stop_count;
1862 }
1863
1864 current->exit_code = signr;
1865 set_current_state(TASK_STOPPED);
1866 if (stop_count == 0)
1867 sig->flags = SIGNAL_STOP_STOPPED;
1868
1869 spin_unlock_irq(&sighand->siglock);
1870 read_unlock(&tasklist_lock);
1871 }
1872
1873 finish_stop(stop_count);
1874 return 1;
1875}
1876
1877/*
1878 * Do appropriate magic when group_stop_count > 0.
1879 * We return nonzero if we stopped, after releasing the siglock.
1880 * We return zero if we still hold the siglock and should look
1881 * for another signal without checking group_stop_count again.
1882 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001883static int handle_group_stop(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884{
1885 int stop_count;
1886
1887 if (current->signal->group_exit_task == current) {
1888 /*
1889 * Group stop is so we can do a core dump,
1890 * We are the initiating thread, so get on with it.
1891 */
1892 current->signal->group_exit_task = NULL;
1893 return 0;
1894 }
1895
1896 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1897 /*
1898 * Group stop is so another thread can do a core dump,
1899 * or else we are racing against a death signal.
1900 * Just punt the stop so we can get the next signal.
1901 */
1902 return 0;
1903
1904 /*
1905 * There is a group stop in progress. We stop
1906 * without any associated signal being in our queue.
1907 */
1908 stop_count = --current->signal->group_stop_count;
1909 if (stop_count == 0)
1910 current->signal->flags = SIGNAL_STOP_STOPPED;
1911 current->exit_code = current->signal->group_exit_code;
1912 set_current_state(TASK_STOPPED);
1913 spin_unlock_irq(&current->sighand->siglock);
1914 finish_stop(stop_count);
1915 return 1;
1916}
1917
1918int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1919 struct pt_regs *regs, void *cookie)
1920{
1921 sigset_t *mask = &current->blocked;
1922 int signr = 0;
1923
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08001924 try_to_freeze();
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926relock:
1927 spin_lock_irq(&current->sighand->siglock);
1928 for (;;) {
1929 struct k_sigaction *ka;
1930
1931 if (unlikely(current->signal->group_stop_count > 0) &&
1932 handle_group_stop())
1933 goto relock;
1934
1935 signr = dequeue_signal(current, mask, info);
1936
1937 if (!signr)
1938 break; /* will return 0 */
1939
1940 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1941 ptrace_signal_deliver(regs, cookie);
1942
1943 /* Let the debugger run. */
1944 ptrace_stop(signr, signr, info);
1945
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001946 /* We're back. Did the debugger cancel the sig or group_exit? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 signr = current->exit_code;
Andrea Arcangeli30e0fca62005-10-30 15:02:38 -08001948 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 continue;
1950
1951 current->exit_code = 0;
1952
1953 /* Update the siginfo structure if the signal has
1954 changed. If the debugger wanted something
1955 specific in the siginfo structure then it should
1956 have updated *info via PTRACE_SETSIGINFO. */
1957 if (signr != info->si_signo) {
1958 info->si_signo = signr;
1959 info->si_errno = 0;
1960 info->si_code = SI_USER;
1961 info->si_pid = current->parent->pid;
1962 info->si_uid = current->parent->uid;
1963 }
1964
1965 /* If the (new) signal is now blocked, requeue it. */
1966 if (sigismember(&current->blocked, signr)) {
1967 specific_send_sig_info(signr, info, current);
1968 continue;
1969 }
1970 }
1971
1972 ka = &current->sighand->action[signr-1];
1973 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1974 continue;
1975 if (ka->sa.sa_handler != SIG_DFL) {
1976 /* Run the handler. */
1977 *return_ka = *ka;
1978
1979 if (ka->sa.sa_flags & SA_ONESHOT)
1980 ka->sa.sa_handler = SIG_DFL;
1981
1982 break; /* will return non-zero "signr" value */
1983 }
1984
1985 /*
1986 * Now we are doing the default action for this signal.
1987 */
1988 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1989 continue;
1990
1991 /* Init gets no signals it doesn't want. */
Eric W. Biedermanfef23e72006-03-28 16:10:58 -08001992 if (current == child_reaper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 continue;
1994
1995 if (sig_kernel_stop(signr)) {
1996 /*
1997 * The default action is to stop all threads in
1998 * the thread group. The job control signals
1999 * do nothing in an orphaned pgrp, but SIGSTOP
2000 * always works. Note that siglock needs to be
2001 * dropped during the call to is_orphaned_pgrp()
2002 * because of lock ordering with tasklist_lock.
2003 * This allows an intervening SIGCONT to be posted.
2004 * We need to check for that and bail out if necessary.
2005 */
2006 if (signr != SIGSTOP) {
2007 spin_unlock_irq(&current->sighand->siglock);
2008
2009 /* signals can be posted during this window */
2010
2011 if (is_orphaned_pgrp(process_group(current)))
2012 goto relock;
2013
2014 spin_lock_irq(&current->sighand->siglock);
2015 }
2016
2017 if (likely(do_signal_stop(signr))) {
2018 /* It released the siglock. */
2019 goto relock;
2020 }
2021
2022 /*
2023 * We didn't actually stop, due to a race
2024 * with SIGCONT or something like that.
2025 */
2026 continue;
2027 }
2028
2029 spin_unlock_irq(&current->sighand->siglock);
2030
2031 /*
2032 * Anything else is fatal, maybe with a core dump.
2033 */
2034 current->flags |= PF_SIGNALED;
2035 if (sig_kernel_coredump(signr)) {
2036 /*
2037 * If it was able to dump core, this kills all
2038 * other threads in the group and synchronizes with
2039 * their demise. If we lost the race with another
2040 * thread getting here, it set group_exit_code
2041 * first and our do_group_exit call below will use
2042 * that value and ignore the one we pass it.
2043 */
2044 do_coredump((long)signr, signr, regs);
2045 }
2046
2047 /*
2048 * Death signals, no core dump.
2049 */
2050 do_group_exit(signr);
2051 /* NOTREACHED */
2052 }
2053 spin_unlock_irq(&current->sighand->siglock);
2054 return signr;
2055}
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057EXPORT_SYMBOL(recalc_sigpending);
2058EXPORT_SYMBOL_GPL(dequeue_signal);
2059EXPORT_SYMBOL(flush_signals);
2060EXPORT_SYMBOL(force_sig);
2061EXPORT_SYMBOL(kill_pg);
2062EXPORT_SYMBOL(kill_proc);
2063EXPORT_SYMBOL(ptrace_notify);
2064EXPORT_SYMBOL(send_sig);
2065EXPORT_SYMBOL(send_sig_info);
2066EXPORT_SYMBOL(sigprocmask);
2067EXPORT_SYMBOL(block_all_signals);
2068EXPORT_SYMBOL(unblock_all_signals);
2069
2070
2071/*
2072 * System call entry points.
2073 */
2074
2075asmlinkage long sys_restart_syscall(void)
2076{
2077 struct restart_block *restart = &current_thread_info()->restart_block;
2078 return restart->fn(restart);
2079}
2080
2081long do_no_restart_syscall(struct restart_block *param)
2082{
2083 return -EINTR;
2084}
2085
2086/*
2087 * We don't need to get the kernel lock - this is all local to this
2088 * particular thread.. (and that's good, because this is _heavily_
2089 * used by various programs)
2090 */
2091
2092/*
2093 * This is also useful for kernel threads that want to temporarily
2094 * (or permanently) block certain signals.
2095 *
2096 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2097 * interface happily blocks "unblockable" signals like SIGKILL
2098 * and friends.
2099 */
2100int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2101{
2102 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
2104 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002105 if (oldset)
2106 *oldset = current->blocked;
2107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 error = 0;
2109 switch (how) {
2110 case SIG_BLOCK:
2111 sigorsets(&current->blocked, &current->blocked, set);
2112 break;
2113 case SIG_UNBLOCK:
2114 signandsets(&current->blocked, &current->blocked, set);
2115 break;
2116 case SIG_SETMASK:
2117 current->blocked = *set;
2118 break;
2119 default:
2120 error = -EINVAL;
2121 }
2122 recalc_sigpending();
2123 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 return error;
2126}
2127
2128asmlinkage long
2129sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2130{
2131 int error = -EINVAL;
2132 sigset_t old_set, new_set;
2133
2134 /* XXX: Don't preclude handling different sized sigset_t's. */
2135 if (sigsetsize != sizeof(sigset_t))
2136 goto out;
2137
2138 if (set) {
2139 error = -EFAULT;
2140 if (copy_from_user(&new_set, set, sizeof(*set)))
2141 goto out;
2142 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2143
2144 error = sigprocmask(how, &new_set, &old_set);
2145 if (error)
2146 goto out;
2147 if (oset)
2148 goto set_old;
2149 } else if (oset) {
2150 spin_lock_irq(&current->sighand->siglock);
2151 old_set = current->blocked;
2152 spin_unlock_irq(&current->sighand->siglock);
2153
2154 set_old:
2155 error = -EFAULT;
2156 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2157 goto out;
2158 }
2159 error = 0;
2160out:
2161 return error;
2162}
2163
2164long do_sigpending(void __user *set, unsigned long sigsetsize)
2165{
2166 long error = -EINVAL;
2167 sigset_t pending;
2168
2169 if (sigsetsize > sizeof(sigset_t))
2170 goto out;
2171
2172 spin_lock_irq(&current->sighand->siglock);
2173 sigorsets(&pending, &current->pending.signal,
2174 &current->signal->shared_pending.signal);
2175 spin_unlock_irq(&current->sighand->siglock);
2176
2177 /* Outside the lock because only this thread touches it. */
2178 sigandsets(&pending, &current->blocked, &pending);
2179
2180 error = -EFAULT;
2181 if (!copy_to_user(set, &pending, sigsetsize))
2182 error = 0;
2183
2184out:
2185 return error;
2186}
2187
2188asmlinkage long
2189sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2190{
2191 return do_sigpending(set, sigsetsize);
2192}
2193
2194#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2195
2196int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2197{
2198 int err;
2199
2200 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2201 return -EFAULT;
2202 if (from->si_code < 0)
2203 return __copy_to_user(to, from, sizeof(siginfo_t))
2204 ? -EFAULT : 0;
2205 /*
2206 * If you change siginfo_t structure, please be sure
2207 * this code is fixed accordingly.
2208 * It should never copy any pad contained in the structure
2209 * to avoid security leaks, but must copy the generic
2210 * 3 ints plus the relevant union member.
2211 */
2212 err = __put_user(from->si_signo, &to->si_signo);
2213 err |= __put_user(from->si_errno, &to->si_errno);
2214 err |= __put_user((short)from->si_code, &to->si_code);
2215 switch (from->si_code & __SI_MASK) {
2216 case __SI_KILL:
2217 err |= __put_user(from->si_pid, &to->si_pid);
2218 err |= __put_user(from->si_uid, &to->si_uid);
2219 break;
2220 case __SI_TIMER:
2221 err |= __put_user(from->si_tid, &to->si_tid);
2222 err |= __put_user(from->si_overrun, &to->si_overrun);
2223 err |= __put_user(from->si_ptr, &to->si_ptr);
2224 break;
2225 case __SI_POLL:
2226 err |= __put_user(from->si_band, &to->si_band);
2227 err |= __put_user(from->si_fd, &to->si_fd);
2228 break;
2229 case __SI_FAULT:
2230 err |= __put_user(from->si_addr, &to->si_addr);
2231#ifdef __ARCH_SI_TRAPNO
2232 err |= __put_user(from->si_trapno, &to->si_trapno);
2233#endif
2234 break;
2235 case __SI_CHLD:
2236 err |= __put_user(from->si_pid, &to->si_pid);
2237 err |= __put_user(from->si_uid, &to->si_uid);
2238 err |= __put_user(from->si_status, &to->si_status);
2239 err |= __put_user(from->si_utime, &to->si_utime);
2240 err |= __put_user(from->si_stime, &to->si_stime);
2241 break;
2242 case __SI_RT: /* This is not generated by the kernel as of now. */
2243 case __SI_MESGQ: /* But this is */
2244 err |= __put_user(from->si_pid, &to->si_pid);
2245 err |= __put_user(from->si_uid, &to->si_uid);
2246 err |= __put_user(from->si_ptr, &to->si_ptr);
2247 break;
2248 default: /* this is just in case for now ... */
2249 err |= __put_user(from->si_pid, &to->si_pid);
2250 err |= __put_user(from->si_uid, &to->si_uid);
2251 break;
2252 }
2253 return err;
2254}
2255
2256#endif
2257
2258asmlinkage long
2259sys_rt_sigtimedwait(const sigset_t __user *uthese,
2260 siginfo_t __user *uinfo,
2261 const struct timespec __user *uts,
2262 size_t sigsetsize)
2263{
2264 int ret, sig;
2265 sigset_t these;
2266 struct timespec ts;
2267 siginfo_t info;
2268 long timeout = 0;
2269
2270 /* XXX: Don't preclude handling different sized sigset_t's. */
2271 if (sigsetsize != sizeof(sigset_t))
2272 return -EINVAL;
2273
2274 if (copy_from_user(&these, uthese, sizeof(these)))
2275 return -EFAULT;
2276
2277 /*
2278 * Invert the set of allowed signals to get those we
2279 * want to block.
2280 */
2281 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2282 signotset(&these);
2283
2284 if (uts) {
2285 if (copy_from_user(&ts, uts, sizeof(ts)))
2286 return -EFAULT;
2287 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2288 || ts.tv_sec < 0)
2289 return -EINVAL;
2290 }
2291
2292 spin_lock_irq(&current->sighand->siglock);
2293 sig = dequeue_signal(current, &these, &info);
2294 if (!sig) {
2295 timeout = MAX_SCHEDULE_TIMEOUT;
2296 if (uts)
2297 timeout = (timespec_to_jiffies(&ts)
2298 + (ts.tv_sec || ts.tv_nsec));
2299
2300 if (timeout) {
2301 /* None ready -- temporarily unblock those we're
2302 * interested while we are sleeping in so that we'll
2303 * be awakened when they arrive. */
2304 current->real_blocked = current->blocked;
2305 sigandsets(&current->blocked, &current->blocked, &these);
2306 recalc_sigpending();
2307 spin_unlock_irq(&current->sighand->siglock);
2308
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07002309 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 spin_lock_irq(&current->sighand->siglock);
2312 sig = dequeue_signal(current, &these, &info);
2313 current->blocked = current->real_blocked;
2314 siginitset(&current->real_blocked, 0);
2315 recalc_sigpending();
2316 }
2317 }
2318 spin_unlock_irq(&current->sighand->siglock);
2319
2320 if (sig) {
2321 ret = sig;
2322 if (uinfo) {
2323 if (copy_siginfo_to_user(uinfo, &info))
2324 ret = -EFAULT;
2325 }
2326 } else {
2327 ret = -EAGAIN;
2328 if (timeout)
2329 ret = -EINTR;
2330 }
2331
2332 return ret;
2333}
2334
2335asmlinkage long
2336sys_kill(int pid, int sig)
2337{
2338 struct siginfo info;
2339
2340 info.si_signo = sig;
2341 info.si_errno = 0;
2342 info.si_code = SI_USER;
2343 info.si_pid = current->tgid;
2344 info.si_uid = current->uid;
2345
2346 return kill_something_info(sig, &info, pid);
2347}
2348
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002349static int do_tkill(int tgid, int pid, int sig)
2350{
2351 int error;
2352 struct siginfo info;
2353 struct task_struct *p;
2354
2355 error = -ESRCH;
2356 info.si_signo = sig;
2357 info.si_errno = 0;
2358 info.si_code = SI_TKILL;
2359 info.si_pid = current->tgid;
2360 info.si_uid = current->uid;
2361
2362 read_lock(&tasklist_lock);
2363 p = find_task_by_pid(pid);
2364 if (p && (tgid <= 0 || p->tgid == tgid)) {
2365 error = check_kill_permission(sig, &info, p);
2366 /*
2367 * The null signal is a permissions and process existence
2368 * probe. No signal is actually delivered.
2369 */
2370 if (!error && sig && p->sighand) {
2371 spin_lock_irq(&p->sighand->siglock);
2372 handle_stop_signal(sig, p);
2373 error = specific_send_sig_info(sig, &info, p);
2374 spin_unlock_irq(&p->sighand->siglock);
2375 }
2376 }
2377 read_unlock(&tasklist_lock);
2378
2379 return error;
2380}
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382/**
2383 * sys_tgkill - send signal to one specific thread
2384 * @tgid: the thread group ID of the thread
2385 * @pid: the PID of the thread
2386 * @sig: signal to be sent
2387 *
2388 * This syscall also checks the tgid and returns -ESRCH even if the PID
2389 * exists but it's not belonging to the target process anymore. This
2390 * method solves the problem of threads exiting and PIDs getting reused.
2391 */
2392asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2393{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 /* This is only valid for single tasks */
2395 if (pid <= 0 || tgid <= 0)
2396 return -EINVAL;
2397
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002398 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399}
2400
2401/*
2402 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2403 */
2404asmlinkage long
2405sys_tkill(int pid, int sig)
2406{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 /* This is only valid for single tasks */
2408 if (pid <= 0)
2409 return -EINVAL;
2410
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002411 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412}
2413
2414asmlinkage long
2415sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2416{
2417 siginfo_t info;
2418
2419 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2420 return -EFAULT;
2421
2422 /* Not even root can pretend to send signals from the kernel.
2423 Nor can they impersonate a kill(), which adds source info. */
2424 if (info.si_code >= 0)
2425 return -EPERM;
2426 info.si_signo = sig;
2427
2428 /* POSIX.1b doesn't mention process groups. */
2429 return kill_proc_info(sig, &info, pid);
2430}
2431
2432int
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002433do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
2435 struct k_sigaction *k;
George Anzinger71fabd5e2006-01-08 01:02:48 -08002436 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
Jesper Juhl7ed20e12005-05-01 08:59:14 -07002438 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return -EINVAL;
2440
2441 k = &current->sighand->action[sig-1];
2442
2443 spin_lock_irq(&current->sighand->siglock);
2444 if (signal_pending(current)) {
2445 /*
2446 * If there might be a fatal signal pending on multiple
2447 * threads, make sure we take it before changing the action.
2448 */
2449 spin_unlock_irq(&current->sighand->siglock);
2450 return -ERESTARTNOINTR;
2451 }
2452
2453 if (oact)
2454 *oact = *k;
2455
2456 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002457 sigdelsetmask(&act->sa.sa_mask,
2458 sigmask(SIGKILL) | sigmask(SIGSTOP));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 /*
2460 * POSIX 3.3.1.3:
2461 * "Setting a signal action to SIG_IGN for a signal that is
2462 * pending shall cause the pending signal to be discarded,
2463 * whether or not it is blocked."
2464 *
2465 * "Setting a signal action to SIG_DFL for a signal that is
2466 * pending and whose default action is to ignore the signal
2467 * (for example, SIGCHLD), shall cause the pending signal to
2468 * be discarded, whether or not it is blocked"
2469 */
2470 if (act->sa.sa_handler == SIG_IGN ||
2471 (act->sa.sa_handler == SIG_DFL &&
2472 sig_kernel_ignore(sig))) {
2473 /*
2474 * This is a fairly rare case, so we only take the
2475 * tasklist_lock once we're sure we'll need it.
2476 * Now we must do this little unlock and relock
2477 * dance to maintain the lock hierarchy.
2478 */
2479 struct task_struct *t = current;
2480 spin_unlock_irq(&t->sighand->siglock);
2481 read_lock(&tasklist_lock);
2482 spin_lock_irq(&t->sighand->siglock);
2483 *k = *act;
George Anzinger71fabd5e2006-01-08 01:02:48 -08002484 sigemptyset(&mask);
2485 sigaddset(&mask, sig);
2486 rm_from_queue_full(&mask, &t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 do {
George Anzinger71fabd5e2006-01-08 01:02:48 -08002488 rm_from_queue_full(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 recalc_sigpending_tsk(t);
2490 t = next_thread(t);
2491 } while (t != current);
2492 spin_unlock_irq(&current->sighand->siglock);
2493 read_unlock(&tasklist_lock);
2494 return 0;
2495 }
2496
2497 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 }
2499
2500 spin_unlock_irq(&current->sighand->siglock);
2501 return 0;
2502}
2503
2504int
2505do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2506{
2507 stack_t oss;
2508 int error;
2509
2510 if (uoss) {
2511 oss.ss_sp = (void __user *) current->sas_ss_sp;
2512 oss.ss_size = current->sas_ss_size;
2513 oss.ss_flags = sas_ss_flags(sp);
2514 }
2515
2516 if (uss) {
2517 void __user *ss_sp;
2518 size_t ss_size;
2519 int ss_flags;
2520
2521 error = -EFAULT;
2522 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2523 || __get_user(ss_sp, &uss->ss_sp)
2524 || __get_user(ss_flags, &uss->ss_flags)
2525 || __get_user(ss_size, &uss->ss_size))
2526 goto out;
2527
2528 error = -EPERM;
2529 if (on_sig_stack(sp))
2530 goto out;
2531
2532 error = -EINVAL;
2533 /*
2534 *
2535 * Note - this code used to test ss_flags incorrectly
2536 * old code may have been written using ss_flags==0
2537 * to mean ss_flags==SS_ONSTACK (as this was the only
2538 * way that worked) - this fix preserves that older
2539 * mechanism
2540 */
2541 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2542 goto out;
2543
2544 if (ss_flags == SS_DISABLE) {
2545 ss_size = 0;
2546 ss_sp = NULL;
2547 } else {
2548 error = -ENOMEM;
2549 if (ss_size < MINSIGSTKSZ)
2550 goto out;
2551 }
2552
2553 current->sas_ss_sp = (unsigned long) ss_sp;
2554 current->sas_ss_size = ss_size;
2555 }
2556
2557 if (uoss) {
2558 error = -EFAULT;
2559 if (copy_to_user(uoss, &oss, sizeof(oss)))
2560 goto out;
2561 }
2562
2563 error = 0;
2564out:
2565 return error;
2566}
2567
2568#ifdef __ARCH_WANT_SYS_SIGPENDING
2569
2570asmlinkage long
2571sys_sigpending(old_sigset_t __user *set)
2572{
2573 return do_sigpending(set, sizeof(*set));
2574}
2575
2576#endif
2577
2578#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2579/* Some platforms have their own version with special arguments others
2580 support only sys_rt_sigprocmask. */
2581
2582asmlinkage long
2583sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2584{
2585 int error;
2586 old_sigset_t old_set, new_set;
2587
2588 if (set) {
2589 error = -EFAULT;
2590 if (copy_from_user(&new_set, set, sizeof(*set)))
2591 goto out;
2592 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2593
2594 spin_lock_irq(&current->sighand->siglock);
2595 old_set = current->blocked.sig[0];
2596
2597 error = 0;
2598 switch (how) {
2599 default:
2600 error = -EINVAL;
2601 break;
2602 case SIG_BLOCK:
2603 sigaddsetmask(&current->blocked, new_set);
2604 break;
2605 case SIG_UNBLOCK:
2606 sigdelsetmask(&current->blocked, new_set);
2607 break;
2608 case SIG_SETMASK:
2609 current->blocked.sig[0] = new_set;
2610 break;
2611 }
2612
2613 recalc_sigpending();
2614 spin_unlock_irq(&current->sighand->siglock);
2615 if (error)
2616 goto out;
2617 if (oset)
2618 goto set_old;
2619 } else if (oset) {
2620 old_set = current->blocked.sig[0];
2621 set_old:
2622 error = -EFAULT;
2623 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2624 goto out;
2625 }
2626 error = 0;
2627out:
2628 return error;
2629}
2630#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2631
2632#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2633asmlinkage long
2634sys_rt_sigaction(int sig,
2635 const struct sigaction __user *act,
2636 struct sigaction __user *oact,
2637 size_t sigsetsize)
2638{
2639 struct k_sigaction new_sa, old_sa;
2640 int ret = -EINVAL;
2641
2642 /* XXX: Don't preclude handling different sized sigset_t's. */
2643 if (sigsetsize != sizeof(sigset_t))
2644 goto out;
2645
2646 if (act) {
2647 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2648 return -EFAULT;
2649 }
2650
2651 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2652
2653 if (!ret && oact) {
2654 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2655 return -EFAULT;
2656 }
2657out:
2658 return ret;
2659}
2660#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2661
2662#ifdef __ARCH_WANT_SYS_SGETMASK
2663
2664/*
2665 * For backwards compatibility. Functionality superseded by sigprocmask.
2666 */
2667asmlinkage long
2668sys_sgetmask(void)
2669{
2670 /* SMP safe */
2671 return current->blocked.sig[0];
2672}
2673
2674asmlinkage long
2675sys_ssetmask(int newmask)
2676{
2677 int old;
2678
2679 spin_lock_irq(&current->sighand->siglock);
2680 old = current->blocked.sig[0];
2681
2682 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2683 sigmask(SIGSTOP)));
2684 recalc_sigpending();
2685 spin_unlock_irq(&current->sighand->siglock);
2686
2687 return old;
2688}
2689#endif /* __ARCH_WANT_SGETMASK */
2690
2691#ifdef __ARCH_WANT_SYS_SIGNAL
2692/*
2693 * For backwards compatibility. Functionality superseded by sigaction.
2694 */
2695asmlinkage unsigned long
2696sys_signal(int sig, __sighandler_t handler)
2697{
2698 struct k_sigaction new_sa, old_sa;
2699 int ret;
2700
2701 new_sa.sa.sa_handler = handler;
2702 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03002703 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
2705 ret = do_sigaction(sig, &new_sa, &old_sa);
2706
2707 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2708}
2709#endif /* __ARCH_WANT_SYS_SIGNAL */
2710
2711#ifdef __ARCH_WANT_SYS_PAUSE
2712
2713asmlinkage long
2714sys_pause(void)
2715{
2716 current->state = TASK_INTERRUPTIBLE;
2717 schedule();
2718 return -ERESTARTNOHAND;
2719}
2720
2721#endif
2722
David Woodhouse150256d2006-01-18 17:43:57 -08002723#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2724asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2725{
2726 sigset_t newset;
2727
2728 /* XXX: Don't preclude handling different sized sigset_t's. */
2729 if (sigsetsize != sizeof(sigset_t))
2730 return -EINVAL;
2731
2732 if (copy_from_user(&newset, unewset, sizeof(newset)))
2733 return -EFAULT;
2734 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2735
2736 spin_lock_irq(&current->sighand->siglock);
2737 current->saved_sigmask = current->blocked;
2738 current->blocked = newset;
2739 recalc_sigpending();
2740 spin_unlock_irq(&current->sighand->siglock);
2741
2742 current->state = TASK_INTERRUPTIBLE;
2743 schedule();
2744 set_thread_flag(TIF_RESTORE_SIGMASK);
2745 return -ERESTARTNOHAND;
2746}
2747#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2748
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749void __init signals_init(void)
2750{
2751 sigqueue_cachep =
2752 kmem_cache_create("sigqueue",
2753 sizeof(struct sigqueue),
2754 __alignof__(struct sigqueue),
2755 SLAB_PANIC, NULL, NULL);
2756}