blob: 1e06f1eba363924a0f018ee05e3df674edd0247a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010016#include <linux/sched/mm.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010017#include <linux/sched/user.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010018#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010019#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010020#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010021#include <linux/sched/cputime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/fs.h>
23#include <linux/tty.h>
24#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070025#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/security.h>
27#include <linux/syscalls.h>
28#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070030#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090031#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070032#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080033#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080034#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080035#include <linux/pid_namespace.h>
36#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080037#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053038#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050039#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000040#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070041#include <linux/compiler.h>
Christoph Hellwig31ea70e2017-06-03 21:01:00 +020042#include <linux/posix-timers.h>
Miroslav Benes43347d52017-11-15 14:50:13 +010043#include <linux/livepatch.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070044
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050045#define CREATE_TRACE_POINTS
46#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/param.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unistd.h>
51#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010052#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040053#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/*
56 * SLAB caches for signal bits.
57 */
58
Christoph Lametere18b8902006-12-06 20:33:20 -080059static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090061int print_fatal_signals __read_mostly;
62
Roland McGrath35de2542008-07-25 19:45:51 -070063static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070064{
Roland McGrath35de2542008-07-25 19:45:51 -070065 return t->sighand->action[sig - 1].sa.sa_handler;
66}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070067
Roland McGrath35de2542008-07-25 19:45:51 -070068static int sig_handler_ignored(void __user *handler, int sig)
69{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070070 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070071 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
73}
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070075static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Roland McGrath35de2542008-07-25 19:45:51 -070077 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Oleg Nesterovf008faf2009-04-02 16:58:02 -070079 handler = sig_handler(t, sig);
80
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterovac253852017-11-17 15:30:04 -080082 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070083 return 1;
84
85 return sig_handler_ignored(handler, sig);
86}
87
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070088static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070089{
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 /*
91 * Blocked signals are never ignored, since the
92 * signal handler may change by the time it is
93 * unblocked.
94 */
Roland McGrath325d22d2007-11-12 15:41:55 -080095 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return 0;
97
Oleg Nesterov628c1bc2017-11-17 15:30:01 -080098 /*
99 * Tracers may want to know about even ignored signal unless it
100 * is SIGKILL which can't be reported anyway but can be ignored
101 * by SIGNAL_UNKILLABLE task.
102 */
103 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -0700104 return 0;
105
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800106 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109/*
110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals.
112 */
113static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
114{
115 unsigned long ready;
116 long i;
117
118 switch (_NSIG_WORDS) {
119 default:
120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
121 ready |= signal->sig[i] &~ blocked->sig[i];
122 break;
123
124 case 4: ready = signal->sig[3] &~ blocked->sig[3];
125 ready |= signal->sig[2] &~ blocked->sig[2];
126 ready |= signal->sig[1] &~ blocked->sig[1];
127 ready |= signal->sig[0] &~ blocked->sig[0];
128 break;
129
130 case 2: ready = signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
132 break;
133
134 case 1: ready = signal->sig[0] &~ blocked->sig[0];
135 }
136 return ready != 0;
137}
138
139#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
140
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700141static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700145 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700147 return 1;
148 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700149 /*
150 * We must never clear the flag in another thread, or in current
151 * when it's possible the current syscall is returning -ERESTART*.
152 * So we don't clear it here, and only callers who know they should do.
153 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700154 return 0;
155}
156
157/*
158 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
159 * This is superfluous when called on current, the wakeup is a harmless no-op.
160 */
161void recalc_sigpending_and_wake(struct task_struct *t)
162{
163 if (recalc_sigpending_tsk(t))
164 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
167void recalc_sigpending(void)
168{
Miroslav Benes43347d52017-11-15 14:50:13 +0100169 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
170 !klp_patch_pending(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700171 clear_thread_flag(TIF_SIGPENDING);
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
Eric W. Biederman088fe472018-07-23 17:26:49 -0500175void calculate_sigpending(void)
176{
177 /* Have any signals or users of TIF_SIGPENDING been delayed
178 * until after fork?
179 */
180 spin_lock_irq(&current->sighand->siglock);
181 set_tsk_thread_flag(current, TIF_SIGPENDING);
182 recalc_sigpending();
183 spin_unlock_irq(&current->sighand->siglock);
184}
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186/* Given the mask, find the first available signal that should be serviced. */
187
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800188#define SYNCHRONOUS_MASK \
189 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500190 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800191
Davide Libenzifba2afa2007-05-10 22:23:13 -0700192int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
194 unsigned long i, *s, *m, x;
195 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 s = pending->signal.sig;
198 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800199
200 /*
201 * Handle the first word specially: it contains the
202 * synchronous signals that need to be dequeued first.
203 */
204 x = *s &~ *m;
205 if (x) {
206 if (x & SYNCHRONOUS_MASK)
207 x &= SYNCHRONOUS_MASK;
208 sig = ffz(~x) + 1;
209 return sig;
210 }
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 switch (_NSIG_WORDS) {
213 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800214 for (i = 1; i < _NSIG_WORDS; ++i) {
215 x = *++s &~ *++m;
216 if (!x)
217 continue;
218 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 break;
222
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800223 case 2:
224 x = s[1] &~ m[1];
225 if (!x)
226 break;
227 sig = ffz(~x) + _NSIG_BPW + 1;
228 break;
229
230 case 1:
231 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 break;
233 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 return sig;
236}
237
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900238static inline void print_dropped_signal(int sig)
239{
240 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
241
242 if (!print_fatal_signals)
243 return;
244
245 if (!__ratelimit(&ratelimit_state))
246 return;
247
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700248 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900249 current->comm, current->pid, sig);
250}
251
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100252/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200253 * task_set_jobctl_pending - set jobctl pending bits
254 * @task: target task
255 * @mask: pending bits to set
256 *
257 * Clear @mask from @task->jobctl. @mask must be subset of
258 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
259 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
260 * cleared. If @task is already being killed or exiting, this function
261 * becomes noop.
262 *
263 * CONTEXT:
264 * Must be called with @task->sighand->siglock held.
265 *
266 * RETURNS:
267 * %true if @mask is set, %false if made noop because @task was dying.
268 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700269bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200270{
271 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
272 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
273 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
274
275 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
276 return false;
277
278 if (mask & JOBCTL_STOP_SIGMASK)
279 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
280
281 task->jobctl |= mask;
282 return true;
283}
284
285/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200286 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100287 * @task: target task
288 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200289 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
290 * Clear it and wake up the ptracer. Note that we don't need any further
291 * locking. @task->siglock guarantees that @task->parent points to the
292 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100293 *
294 * CONTEXT:
295 * Must be called with @task->sighand->siglock held.
296 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200297void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100298{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200299 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
300 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700301 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200302 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100303 }
304}
305
306/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200307 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100308 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200309 * @mask: pending bits to clear
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100310 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200311 * Clear @mask from @task->jobctl. @mask must be subset of
312 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
313 * STOP bits are cleared together.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100314 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200315 * If clearing of @mask leaves no stop or trap pending, this function calls
316 * task_clear_jobctl_trapping().
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100317 *
318 * CONTEXT:
319 * Must be called with @task->sighand->siglock held.
320 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700321void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100322{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200323 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
324
325 if (mask & JOBCTL_STOP_PENDING)
326 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
327
328 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200329
330 if (!(task->jobctl & JOBCTL_PENDING_MASK))
331 task_clear_jobctl_trapping(task);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100332}
333
334/**
335 * task_participate_group_stop - participate in a group stop
336 * @task: task participating in a group stop
337 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200338 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100339 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200340 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100341 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100342 *
343 * CONTEXT:
344 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100345 *
346 * RETURNS:
347 * %true if group stop completion should be notified to the parent, %false
348 * otherwise.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100349 */
350static bool task_participate_group_stop(struct task_struct *task)
351{
352 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200353 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100354
Tejun Heoa8f072c2011-06-02 11:13:59 +0200355 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100356
Tejun Heo3759a0d2011-06-02 11:14:00 +0200357 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100358
359 if (!consume)
360 return false;
361
362 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
363 sig->group_stop_count--;
364
Tejun Heo244056f2011-03-23 10:37:01 +0100365 /*
366 * Tell the caller to notify completion iff we are entering into a
367 * fresh group stop. Read comment in do_signal_stop() for details.
368 */
369 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800370 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100371 return true;
372 }
373 return false;
374}
375
David Howellsc69e8d92008-11-14 10:39:19 +1100376/*
377 * allocate a new signal queue record
378 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700379 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100380 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900381static struct sigqueue *
382__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
384 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800385 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800387 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000388 * Protect access to @t credentials. This can go away when all
389 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800390 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000391 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100392 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800393 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000394 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800397 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800398 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900400 } else {
401 print_dropped_signal(sig);
402 }
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800405 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100406 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 } else {
408 INIT_LIST_HEAD(&q->list);
409 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100410 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
David Howellsd84f4f92008-11-14 10:39:23 +1100412
413 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
Andrew Morton514a01b2006-02-03 03:04:41 -0800416static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
418 if (q->flags & SIGQUEUE_PREALLOC)
419 return;
420 atomic_dec(&q->user->sigpending);
421 free_uid(q->user);
422 kmem_cache_free(sigqueue_cachep, q);
423}
424
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800425void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct sigqueue *q;
428
429 sigemptyset(&queue->signal);
430 while (!list_empty(&queue->list)) {
431 q = list_entry(queue->list.next, struct sigqueue , list);
432 list_del_init(&q->list);
433 __sigqueue_free(q);
434 }
435}
436
437/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400438 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800440void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
442 unsigned long flags;
443
444 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400445 clear_tsk_thread_flag(t, TIF_SIGPENDING);
446 flush_sigqueue(&t->pending);
447 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 spin_unlock_irqrestore(&t->sighand->siglock, flags);
449}
450
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500451#ifdef CONFIG_POSIX_TIMERS
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400452static void __flush_itimer_signals(struct sigpending *pending)
453{
454 sigset_t signal, retain;
455 struct sigqueue *q, *n;
456
457 signal = pending->signal;
458 sigemptyset(&retain);
459
460 list_for_each_entry_safe(q, n, &pending->list, list) {
461 int sig = q->info.si_signo;
462
463 if (likely(q->info.si_code != SI_TIMER)) {
464 sigaddset(&retain, sig);
465 } else {
466 sigdelset(&signal, sig);
467 list_del_init(&q->list);
468 __sigqueue_free(q);
469 }
470 }
471
472 sigorsets(&pending->signal, &signal, &retain);
473}
474
475void flush_itimer_signals(void)
476{
477 struct task_struct *tsk = current;
478 unsigned long flags;
479
480 spin_lock_irqsave(&tsk->sighand->siglock, flags);
481 __flush_itimer_signals(&tsk->pending);
482 __flush_itimer_signals(&tsk->signal->shared_pending);
483 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
484}
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500485#endif
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400486
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700487void ignore_signals(struct task_struct *t)
488{
489 int i;
490
491 for (i = 0; i < _NSIG; ++i)
492 t->sighand->action[i].sa.sa_handler = SIG_IGN;
493
494 flush_signals(t);
495}
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 * Flush all handlers for a task.
499 */
500
501void
502flush_signal_handlers(struct task_struct *t, int force_default)
503{
504 int i;
505 struct k_sigaction *ka = &t->sighand->action[0];
506 for (i = _NSIG ; i != 0 ; i--) {
507 if (force_default || ka->sa.sa_handler != SIG_IGN)
508 ka->sa.sa_handler = SIG_DFL;
509 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700510#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700511 ka->sa.sa_restorer = NULL;
512#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 sigemptyset(&ka->sa.sa_mask);
514 ka++;
515 }
516}
517
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200518int unhandled_signal(struct task_struct *tsk, int sig)
519{
Roland McGrath445a91d2008-07-25 19:45:52 -0700520 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700521 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200522 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700523 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200524 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200525 /* if ptraced, let the tracer determine */
526 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200527}
528
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500529static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
530 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 /*
535 * Collect the siginfo appropriate to this signal. Check if
536 * there is another siginfo for the same signal.
537 */
538 list_for_each_entry(q, &list->list, list) {
539 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700540 if (first)
541 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 first = q;
543 }
544 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700545
546 sigdelset(&list->signal, sig);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700549still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 list_del_init(&first->list);
551 copy_siginfo(info, &first->info);
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500552
553 *resched_timer =
554 (first->flags & SIGQUEUE_PREALLOC) &&
555 (info->si_code == SI_TIMER) &&
556 (info->si_sys_private);
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700560 /*
561 * Ok, it wasn't in the queue. This must be
562 * a fast-pathed signal or we must have been
563 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 */
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600565 clear_siginfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 info->si_signo = sig;
567 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800568 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 info->si_pid = 0;
570 info->si_uid = 0;
571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500575 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Roland McGrath27d91e02006-09-29 02:00:31 -0700577 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800579 if (sig)
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500580 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 return sig;
582}
583
584/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700585 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 * expected to free it.
587 *
588 * All callers have to hold the siglock.
589 */
590int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
591{
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500592 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700593 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000594
595 /* We only dequeue private signals from ourselves, we don't let
596 * signalfd steal them
597 */
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500598 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800599 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500601 mask, info, &resched_timer);
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500602#ifdef CONFIG_POSIX_TIMERS
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800603 /*
604 * itimer signal ?
605 *
606 * itimers are process shared and we restart periodic
607 * itimers in the signal delivery path to prevent DoS
608 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700609 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800610 * itimers, as the SIGALRM is a legacy signal and only
611 * queued once. Changing the restart behaviour to
612 * restart the timer in the signal dequeue path is
613 * reducing the timer noise on heavy loaded !highres
614 * systems too.
615 */
616 if (unlikely(signr == SIGALRM)) {
617 struct hrtimer *tmr = &tsk->signal->real_timer;
618
619 if (!hrtimer_is_queued(tmr) &&
Thomas Gleixner2456e852016-12-25 11:38:40 +0100620 tsk->signal->it_real_incr != 0) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800621 hrtimer_forward(tmr, tmr->base->get_time(),
622 tsk->signal->it_real_incr);
623 hrtimer_restart(tmr);
624 }
625 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500626#endif
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800627 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700628
Davide Libenzib8fceee2007-09-20 12:40:16 -0700629 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700630 if (!signr)
631 return 0;
632
633 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800634 /*
635 * Set a marker that we have dequeued a stop signal. Our
636 * caller might release the siglock and then the pending
637 * stop signal it is about to process is no longer in the
638 * pending bitmasks, but must still be cleared by a SIGCONT
639 * (and overruled by a SIGKILL). So those cases clear this
640 * shared flag after we've set it. Note that this flag may
641 * remain set after the signal we return is ignored or
642 * handled. That doesn't matter because its only purpose
643 * is to alert stop-signal processing code when another
644 * processor has come along and cleared the flag.
645 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200646 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800647 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500648#ifdef CONFIG_POSIX_TIMERS
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500649 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 /*
651 * Release the siglock to ensure proper locking order
652 * of timer locks outside of siglocks. Note, we leave
653 * irqs disabled here, since the posix-timers code is
654 * about to disable them again anyway.
655 */
656 spin_unlock(&tsk->sighand->siglock);
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200657 posixtimer_rearm(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 spin_lock(&tsk->sighand->siglock);
Eric W. Biederman9943d3a2017-07-24 14:53:03 -0500659
660 /* Don't expose the si_sys_private value to userspace */
661 info->si_sys_private = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500663#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return signr;
665}
666
667/*
668 * Tell a process that it has a new active signal..
669 *
670 * NOTE! we rely on the previous spin_lock to
671 * lock interrupts for us! We can only be called with
672 * "siglock" held, and the local interrupt must
673 * have been disabled when that got acquired!
674 *
675 * No need to set need_resched since signal event passing
676 * goes through ->blocked
677 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100678void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100682 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500683 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 * executing another processor and just now entering stopped state.
685 * By using wake_up_state, we ensure the process will wake up and
686 * handle its death signal.
687 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100688 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 kick_process(t);
690}
691
692/*
693 * Remove signals in mask from the pending set and queue.
694 * Returns 1 if any signals were found.
695 *
696 * All callers must be holding the siglock.
George Anzinger71fabd5e2006-01-08 01:02:48 -0800697 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700698static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd5e2006-01-08 01:02:48 -0800699{
700 struct sigqueue *q, *n;
701 sigset_t m;
702
703 sigandsets(&m, mask, &s->signal);
704 if (sigisemptyset(&m))
705 return 0;
706
Oleg Nesterov702a5072011-04-27 22:01:27 +0200707 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd5e2006-01-08 01:02:48 -0800708 list_for_each_entry_safe(q, n, &s->list, list) {
709 if (sigismember(mask, q->info.si_signo)) {
710 list_del_init(&q->list);
711 __sigqueue_free(q);
712 }
713 }
714 return 1;
715}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Oleg Nesterov614c5172009-12-15 16:47:22 -0800717static inline int is_si_special(const struct siginfo *info)
718{
719 return info <= SEND_SIG_FORCED;
720}
721
722static inline bool si_fromuser(const struct siginfo *info)
723{
724 return info == SEND_SIG_NOINFO ||
725 (!is_si_special(info) && SI_FROMUSER(info));
726}
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700729 * called with RCU read lock from check_kill_permission()
730 */
731static int kill_ok_by_cred(struct task_struct *t)
732{
733 const struct cred *cred = current_cred();
734 const struct cred *tcred = __task_cred(t);
735
Eric W. Biederman5af66202012-03-03 20:21:47 -0800736 if (uid_eq(cred->euid, tcred->suid) ||
737 uid_eq(cred->euid, tcred->uid) ||
738 uid_eq(cred->uid, tcred->suid) ||
739 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700740 return 1;
741
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800742 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700743 return 1;
744
745 return 0;
746}
747
748/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100750 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 */
752static int check_kill_permission(int sig, struct siginfo *info,
753 struct task_struct *t)
754{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700755 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700756 int error;
757
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700758 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700759 return -EINVAL;
760
Oleg Nesterov614c5172009-12-15 16:47:22 -0800761 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700762 return 0;
763
764 error = audit_signal_info(sig, t); /* Let audit system see the signal */
765 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400767
Oleg Nesterov065add32010-05-26 14:42:54 -0700768 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700769 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700770 switch (sig) {
771 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700772 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700773 /*
774 * We don't return the error if sid == NULL. The
775 * task was unhashed, the caller must notice this.
776 */
777 if (!sid || sid == task_session(current))
778 break;
779 default:
780 return -EPERM;
781 }
782 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100783
Stephen Smalley6b4f3d02017-09-08 12:40:01 -0400784 return security_task_kill(t, info, sig, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
Tejun Heofb1d9102011-06-14 11:20:17 +0200787/**
788 * ptrace_trap_notify - schedule trap to notify ptracer
789 * @t: tracee wanting to notify tracer
790 *
791 * This function schedules sticky ptrace trap which is cleared on the next
792 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
793 * ptracer.
794 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200795 * If @t is running, STOP trap will be taken. If trapped for STOP and
796 * ptracer is listening for events, tracee is woken up so that it can
797 * re-trap for the new event. If trapped otherwise, STOP trap will be
798 * eventually taken without returning to userland after the existing traps
799 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200800 *
801 * CONTEXT:
802 * Must be called with @task->sighand->siglock held.
803 */
804static void ptrace_trap_notify(struct task_struct *t)
805{
806 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
807 assert_spin_locked(&t->sighand->siglock);
808
809 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100810 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200811}
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700814 * Handle magic process-wide effects of stop/continue signals. Unlike
815 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * time regardless of blocking, ignoring, or handling. This does the
817 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700818 * signals. The process stop is done as a signal action for SIG_DFL.
819 *
820 * Returns true if the signal should be actually delivered, otherwise
821 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700823static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700825 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700827 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Oleg Nesterov403bad72013-04-30 15:28:10 -0700829 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800830 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700831 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700833 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700835 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 /*
837 * This is a stop signal. Remove SIGCONT from all queues.
838 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700839 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700840 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700841 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700842 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700844 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200846 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700848 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700849 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700850 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700851 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200852 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200853 if (likely(!(t->ptrace & PT_SEIZED)))
854 wake_up_state(t, __TASK_STOPPED);
855 else
856 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700859 /*
860 * Notify the parent with CLD_CONTINUED if we were stopped.
861 *
862 * If we were in the middle of a group stop, we pretend it
863 * was already finished, and then continued. Since SIGCHLD
864 * doesn't queue we report only CLD_STOPPED, as if the next
865 * CLD_CONTINUED was dropped.
866 */
867 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700868 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700869 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700870 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700871 why |= SIGNAL_CLD_STOPPED;
872
873 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700874 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700875 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700876 * will take ->siglock, notice SIGNAL_CLD_MASK, and
877 * notify its parent. See get_signal_to_deliver().
878 */
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800879 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700880 signal->group_stop_count = 0;
881 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700884
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700885 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886}
887
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700888/*
889 * Test if P wants to take SIG. After we've checked all threads with this,
890 * it's equivalent to finding no threads not blocking SIG. Any threads not
891 * blocking SIG were ruled out because they are not running and already
892 * have pending signals. Such threads will dequeue from the shared queue
893 * as soon as they're available, so putting the signal on the shared queue
894 * will be equivalent to sending it to one such thread.
895 */
896static inline int wants_signal(int sig, struct task_struct *p)
897{
898 if (sigismember(&p->blocked, sig))
899 return 0;
900 if (p->flags & PF_EXITING)
901 return 0;
902 if (sig == SIGKILL)
903 return 1;
904 if (task_is_stopped_or_traced(p))
905 return 0;
906 return task_curr(p) || !signal_pending(p);
907}
908
Eric W. Biederman07296142018-07-13 21:39:13 -0500909static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700910{
911 struct signal_struct *signal = p->signal;
912 struct task_struct *t;
913
914 /*
915 * Now find a thread we can wake up to take the signal off the queue.
916 *
917 * If the main thread wants the signal, it gets first crack.
918 * Probably the least surprising to the average bear.
919 */
920 if (wants_signal(sig, p))
921 t = p;
Eric W. Biederman07296142018-07-13 21:39:13 -0500922 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700923 /*
924 * There is just one thread and it does not need to be woken.
925 * It will dequeue unblocked signals before it runs again.
926 */
927 return;
928 else {
929 /*
930 * Otherwise try to find a suitable thread.
931 */
932 t = signal->curr_target;
933 while (!wants_signal(sig, t)) {
934 t = next_thread(t);
935 if (t == signal->curr_target)
936 /*
937 * No thread needs to be woken.
938 * Any eligible threads will see
939 * the signal in the queue soon.
940 */
941 return;
942 }
943 signal->curr_target = t;
944 }
945
946 /*
947 * Found a killable thread. If the signal will be fatal,
948 * then start taking the whole group down immediately.
949 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700950 if (sig_fatal(p, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800951 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700952 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800953 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700954 /*
955 * This signal will be fatal to the whole group.
956 */
957 if (!sig_kernel_coredump(sig)) {
958 /*
959 * Start a group exit and wake everybody up.
960 * This way we don't have other threads
961 * running and doing things after a slower
962 * thread has the fatal signal pending.
963 */
964 signal->flags = SIGNAL_GROUP_EXIT;
965 signal->group_exit_code = sig;
966 signal->group_stop_count = 0;
967 t = p;
968 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200969 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700970 sigaddset(&t->pending.signal, SIGKILL);
971 signal_wake_up(t, 1);
972 } while_each_thread(p, t);
973 return;
974 }
975 }
976
977 /*
978 * The signal is already in the shared-pending queue.
979 * Tell the chosen thread to wake up and dequeue it.
980 */
981 signal_wake_up(t, sig == SIGKILL);
982 return;
983}
984
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -0700985static inline int legacy_queue(struct sigpending *signals, int sig)
986{
987 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
988}
989
Serge E. Hallyn6b550f92012-01-10 15:11:37 -0800990#ifdef CONFIG_USER_NS
991static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
992{
993 if (current_user_ns() == task_cred_xxx(t, user_ns))
994 return;
995
996 if (SI_FROMKERNEL(info))
997 return;
998
Eric W. Biederman078de5f2012-02-08 07:00:08 -0800999 rcu_read_lock();
1000 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1001 make_kuid(current_user_ns(), info->si_uid));
1002 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001003}
1004#else
1005static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1006{
1007 return;
1008}
1009#endif
1010
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001011static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001012 enum pid_type type, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001014 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001015 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001016 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001017 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001018
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001019 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001020
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001021 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001022 if (!prepare_signal(sig, t,
1023 from_ancestor_ns || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001024 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001025
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001026 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001028 * Short-circuit ignored signals and support queuing
1029 * exactly one non-rt signal, so that we can get more
1030 * detailed information about the cause of the signal.
1031 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001032 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001033 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001034 goto ret;
1035
1036 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001037 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 * fast-pathed signals for kernel-internal things like SIGSTOP
1039 * or SIGKILL.
1040 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001041 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 goto out_set;
1043
Randy Dunlap5aba0852011-04-04 14:59:31 -07001044 /*
1045 * Real-time signals must be queued if sent by sigqueue, or
1046 * some other real-time mechanism. It is implementation
1047 * defined whether kill() does so. We attempt to do so, on
1048 * the principle of least surprise, but since kill is not
1049 * allowed to fail with EAGAIN when low on memory we just
1050 * make sure at least one signal gets delivered and don't
1051 * pass on the info struct.
1052 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001053 if (sig < SIGRTMIN)
1054 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1055 else
1056 override_rlimit = 0;
1057
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001058 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001060 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001062 case (unsigned long) SEND_SIG_NOINFO:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001063 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 q->info.si_signo = sig;
1065 q->info.si_errno = 0;
1066 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001067 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001068 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001069 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001071 case (unsigned long) SEND_SIG_PRIV:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001072 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 q->info.si_signo = sig;
1074 q->info.si_errno = 0;
1075 q->info.si_code = SI_KERNEL;
1076 q->info.si_pid = 0;
1077 q->info.si_uid = 0;
1078 break;
1079 default:
1080 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001081 if (from_ancestor_ns)
1082 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 break;
1084 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001085
1086 userns_fixup_signal_uid(&q->info, t);
1087
Oleg Nesterov621d3122005-10-30 15:03:45 -08001088 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001089 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1090 /*
1091 * Queue overflow, abort. We may abort if the
1092 * signal was rt and sent by user using something
1093 * other than kill().
1094 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001095 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1096 ret = -EAGAIN;
1097 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001098 } else {
1099 /*
1100 * This is a silent loss of information. We still
1101 * send the signal, but the *info bits are lost.
1102 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001103 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106
1107out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001108 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001109 sigaddset(&pending->signal, sig);
Eric W. Biederman07296142018-07-13 21:39:13 -05001110 complete_signal(sig, t, type);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001111ret:
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001112 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001113 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114}
1115
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001116static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001117 enum pid_type type)
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001118{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001119 int from_ancestor_ns = 0;
1120
1121#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001122 from_ancestor_ns = si_fromuser(info) &&
1123 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001124#endif
1125
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001126 return __send_signal(sig, info, t, type, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001127}
1128
Al Viro4aaefee2012-11-05 13:09:56 -05001129static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001130{
Al Viro4aaefee2012-11-05 13:09:56 -05001131 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001132 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001133
Al Viroca5cd872007-10-29 04:31:16 +00001134#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001135 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001136 {
1137 int i;
1138 for (i = 0; i < 16; i++) {
1139 unsigned char insn;
1140
Andi Kleenb45c6e72010-01-08 14:42:52 -08001141 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1142 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001143 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001144 }
1145 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001146 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001147#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001148 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001149 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001150 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001151}
1152
1153static int __init setup_print_fatal_signals(char *str)
1154{
1155 get_option (&str, &print_fatal_signals);
1156
1157 return 1;
1158}
1159
1160__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001162int
1163__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1164{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001165 return send_signal(sig, info, p, PIDTYPE_TGID);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001166}
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168static int
1169specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1170{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001171 return send_signal(sig, info, t, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001174int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001175 enum pid_type type)
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001176{
1177 unsigned long flags;
1178 int ret = -ESRCH;
1179
1180 if (lock_task_sighand(p, &flags)) {
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001181 ret = send_signal(sig, info, p, type);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001182 unlock_task_sighand(p, &flags);
1183 }
1184
1185 return ret;
1186}
1187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188/*
1189 * Force a signal that the process can't ignore: if necessary
1190 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001191 *
1192 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1193 * since we do not want to have a signal handler that was blocked
1194 * be invoked when user space had explicitly blocked it.
1195 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001196 * We don't want to have recursive SIGSEGV's etc, for example,
1197 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199int
1200force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1201{
1202 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001203 int ret, blocked, ignored;
1204 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001207 action = &t->sighand->action[sig-1];
1208 ignored = action->sa.sa_handler == SIG_IGN;
1209 blocked = sigismember(&t->blocked, sig);
1210 if (blocked || ignored) {
1211 action->sa.sa_handler = SIG_DFL;
1212 if (blocked) {
1213 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001214 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 }
Jamie Ileseb61b592017-08-18 15:16:18 -07001217 /*
1218 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1219 * debugging to leave init killable.
1220 */
1221 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001222 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 ret = specific_send_sig_info(sig, info, t);
1224 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1225
1226 return ret;
1227}
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229/*
1230 * Nuke all other threads in the group.
1231 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001232int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001234 struct task_struct *t = p;
1235 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 p->signal->group_stop_count = 0;
1238
Oleg Nesterov09faef12010-05-26 14:43:11 -07001239 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001240 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001241 count++;
1242
1243 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 if (t->exit_state)
1245 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 signal_wake_up(t, 1);
1248 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001249
1250 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251}
1252
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001253struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1254 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001255{
1256 struct sighand_struct *sighand;
1257
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001258 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001259 for (;;) {
1260 sighand = rcu_dereference(tsk->sighand);
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001261 if (unlikely(sighand == NULL))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001262 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001263
Oleg Nesterov392809b2014-09-28 23:44:18 +02001264 /*
1265 * This sighand can be already freed and even reused, but
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001266 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
Oleg Nesterov392809b2014-09-28 23:44:18 +02001267 * initializes ->siglock: this slab can't go away, it has
1268 * the same object type, ->siglock can't be reinitialized.
1269 *
1270 * We need to ensure that tsk->sighand is still the same
1271 * after we take the lock, we can race with de_thread() or
1272 * __exit_signal(). In the latter case the next iteration
1273 * must see ->sighand == NULL.
1274 */
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001275 spin_lock_irqsave(&sighand->siglock, *flags);
1276 if (likely(sighand == tsk->sighand))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001277 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001278 spin_unlock_irqrestore(&sighand->siglock, *flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001279 }
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001280 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001281
1282 return sighand;
1283}
1284
David Howellsc69e8d92008-11-14 10:39:19 +11001285/*
1286 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001287 */
Eric W. Biederman01024982018-07-13 18:40:57 -05001288int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1289 enum pid_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290{
David Howells694f6902010-08-04 16:59:14 +01001291 int ret;
1292
1293 rcu_read_lock();
1294 ret = check_kill_permission(sig, info, p);
1295 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001297 if (!ret && sig)
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001298 ret = do_send_sig_info(sig, info, p, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 return ret;
1301}
1302
1303/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001304 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001306 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001308int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
1310 struct task_struct *p = NULL;
1311 int retval, success;
1312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 success = 0;
1314 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001315 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001316 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 success |= !err;
1318 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001319 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 return success ? 0 : retval;
1321}
1322
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001323int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001325 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 struct task_struct *p;
1327
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001328 for (;;) {
1329 rcu_read_lock();
1330 p = pid_task(pid, PIDTYPE_PID);
1331 if (p)
Eric W. Biederman01024982018-07-13 18:40:57 -05001332 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001333 rcu_read_unlock();
1334 if (likely(!p || error != -ESRCH))
1335 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001336
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001337 /*
1338 * The task was unhashed in between, try again. If it
1339 * is dead, pid_task() will return NULL, if we race with
1340 * de_thread() it will find the new leader.
1341 */
1342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343}
1344
Eric W. Biederman6c478ae2017-04-17 22:10:04 -05001345static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001346{
1347 int error;
1348 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001349 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001350 rcu_read_unlock();
1351 return error;
1352}
1353
Serge Hallynd178bc32011-09-26 10:45:18 -05001354static int kill_as_cred_perm(const struct cred *cred,
1355 struct task_struct *target)
1356{
1357 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001358 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1359 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001360 return 0;
1361 return 1;
1362}
1363
Eric W. Biederman2425c082006-10-02 02:17:28 -07001364/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001365int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001366 const struct cred *cred)
Harald Welte46113832005-10-10 19:44:29 +02001367{
1368 int ret = -EINVAL;
1369 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001370 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001371
1372 if (!valid_signal(sig))
1373 return ret;
1374
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001375 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001376 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001377 if (!p) {
1378 ret = -ESRCH;
1379 goto out_unlock;
1380 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001381 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001382 ret = -EPERM;
1383 goto out_unlock;
1384 }
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001385 ret = security_task_kill(p, info, sig, cred);
David Quigley8f95dc52006-06-30 01:55:47 -07001386 if (ret)
1387 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001388
1389 if (sig) {
1390 if (lock_task_sighand(p, &flags)) {
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001391 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001392 unlock_task_sighand(p, &flags);
1393 } else
1394 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001395 }
1396out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001397 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001398 return ret;
1399}
Serge Hallynd178bc32011-09-26 10:45:18 -05001400EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
1402/*
1403 * kill_something_info() interprets pid in interesting ways just like kill(2).
1404 *
1405 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1406 * is probably wrong. Should make it like BSD or SYSV.
1407 */
1408
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001409static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001411 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001412
1413 if (pid > 0) {
1414 rcu_read_lock();
1415 ret = kill_pid_info(sig, info, find_vpid(pid));
1416 rcu_read_unlock();
1417 return ret;
1418 }
1419
zhongjiang4ea77012017-07-10 15:52:57 -07001420 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1421 if (pid == INT_MIN)
1422 return -ESRCH;
1423
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001424 read_lock(&tasklist_lock);
1425 if (pid != -1) {
1426 ret = __kill_pgrp_info(sig, info,
1427 pid ? find_vpid(-pid) : task_pgrp(current));
1428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 int retval = 0, count = 0;
1430 struct task_struct * p;
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001433 if (task_pid_vnr(p) > 1 &&
1434 !same_thread_group(p, current)) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001435 int err = group_send_sig_info(sig, info, p,
1436 PIDTYPE_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 ++count;
1438 if (err != -EPERM)
1439 retval = err;
1440 }
1441 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001442 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001444 read_unlock(&tasklist_lock);
1445
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001446 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
1449/*
1450 * These are for backward compatibility with the rest of the kernel source.
1451 */
1452
Randy Dunlap5aba0852011-04-04 14:59:31 -07001453int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 /*
1456 * Make sure legacy kernel users don't send in bad values
1457 * (normal paths check this in check_kill_permission).
1458 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001459 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return -EINVAL;
1461
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001462 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463}
1464
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001465#define __si_special(priv) \
1466 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468int
1469send_sig(int sig, struct task_struct *p, int priv)
1470{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001471 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472}
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474void
1475force_sig(int sig, struct task_struct *p)
1476{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001477 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
1480/*
1481 * When things go south during signal handling, we
1482 * will force a SIGSEGV. And if the signal that caused
1483 * the problem was already a SIGSEGV, we'll want to
1484 * make sure we don't even try to deliver the signal..
1485 */
1486int
1487force_sigsegv(int sig, struct task_struct *p)
1488{
1489 if (sig == SIGSEGV) {
1490 unsigned long flags;
1491 spin_lock_irqsave(&p->sighand->siglock, flags);
1492 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1493 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1494 }
1495 force_sig(SIGSEGV, p);
1496 return 0;
1497}
1498
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001499int force_sig_fault(int sig, int code, void __user *addr
1500 ___ARCH_SI_TRAPNO(int trapno)
1501 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1502 , struct task_struct *t)
1503{
1504 struct siginfo info;
1505
1506 clear_siginfo(&info);
1507 info.si_signo = sig;
1508 info.si_errno = 0;
1509 info.si_code = code;
1510 info.si_addr = addr;
1511#ifdef __ARCH_SI_TRAPNO
1512 info.si_trapno = trapno;
1513#endif
1514#ifdef __ia64__
1515 info.si_imm = imm;
1516 info.si_flags = flags;
1517 info.si_isr = isr;
1518#endif
1519 return force_sig_info(info.si_signo, &info, t);
1520}
1521
1522int send_sig_fault(int sig, int code, void __user *addr
1523 ___ARCH_SI_TRAPNO(int trapno)
1524 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1525 , struct task_struct *t)
1526{
1527 struct siginfo info;
1528
1529 clear_siginfo(&info);
1530 info.si_signo = sig;
1531 info.si_errno = 0;
1532 info.si_code = code;
1533 info.si_addr = addr;
1534#ifdef __ARCH_SI_TRAPNO
1535 info.si_trapno = trapno;
1536#endif
1537#ifdef __ia64__
1538 info.si_imm = imm;
1539 info.si_flags = flags;
1540 info.si_isr = isr;
1541#endif
1542 return send_sig_info(info.si_signo, &info, t);
1543}
1544
Eric W. Biederman38246732018-01-18 18:54:31 -06001545int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1546{
1547 struct siginfo info;
1548
1549 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1550 clear_siginfo(&info);
1551 info.si_signo = SIGBUS;
1552 info.si_errno = 0;
1553 info.si_code = code;
1554 info.si_addr = addr;
1555 info.si_addr_lsb = lsb;
1556 return force_sig_info(info.si_signo, &info, t);
1557}
1558
1559int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1560{
1561 struct siginfo info;
1562
1563 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1564 clear_siginfo(&info);
1565 info.si_signo = SIGBUS;
1566 info.si_errno = 0;
1567 info.si_code = code;
1568 info.si_addr = addr;
1569 info.si_addr_lsb = lsb;
1570 return send_sig_info(info.si_signo, &info, t);
1571}
1572EXPORT_SYMBOL(send_sig_mceerr);
Eric W. Biederman38246732018-01-18 18:54:31 -06001573
Eric W. Biederman38246732018-01-18 18:54:31 -06001574int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1575{
1576 struct siginfo info;
1577
1578 clear_siginfo(&info);
1579 info.si_signo = SIGSEGV;
1580 info.si_errno = 0;
1581 info.si_code = SEGV_BNDERR;
1582 info.si_addr = addr;
1583 info.si_lower = lower;
1584 info.si_upper = upper;
1585 return force_sig_info(info.si_signo, &info, current);
1586}
Eric W. Biederman38246732018-01-18 18:54:31 -06001587
1588#ifdef SEGV_PKUERR
1589int force_sig_pkuerr(void __user *addr, u32 pkey)
1590{
1591 struct siginfo info;
1592
1593 clear_siginfo(&info);
1594 info.si_signo = SIGSEGV;
1595 info.si_errno = 0;
1596 info.si_code = SEGV_PKUERR;
1597 info.si_addr = addr;
1598 info.si_pkey = pkey;
1599 return force_sig_info(info.si_signo, &info, current);
1600}
1601#endif
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001602
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001603/* For the crazy architectures that include trap information in
1604 * the errno field, instead of an actual errno value.
1605 */
1606int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1607{
1608 struct siginfo info;
1609
1610 clear_siginfo(&info);
1611 info.si_signo = SIGTRAP;
1612 info.si_errno = errno;
1613 info.si_code = TRAP_HWBKPT;
1614 info.si_addr = addr;
1615 return force_sig_info(info.si_signo, &info, current);
1616}
1617
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001618int kill_pgrp(struct pid *pid, int sig, int priv)
1619{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001620 int ret;
1621
1622 read_lock(&tasklist_lock);
1623 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1624 read_unlock(&tasklist_lock);
1625
1626 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001627}
1628EXPORT_SYMBOL(kill_pgrp);
1629
1630int kill_pid(struct pid *pid, int sig, int priv)
1631{
1632 return kill_pid_info(sig, __si_special(priv), pid);
1633}
1634EXPORT_SYMBOL(kill_pid);
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636/*
1637 * These functions support sending signals using preallocated sigqueue
1638 * structures. This is needed "because realtime applications cannot
1639 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001640 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 * we allocate the sigqueue structure from the timer_create. If this
1642 * allocation fails we are able to report the failure to the application
1643 * with an EAGAIN error.
1644 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645struct sigqueue *sigqueue_alloc(void)
1646{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001647 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001649 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001651
1652 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653}
1654
1655void sigqueue_free(struct sigqueue *q)
1656{
1657 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001658 spinlock_t *lock = &current->sighand->siglock;
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1661 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001662 * We must hold ->siglock while testing q->list
1663 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001664 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001666 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001667 q->flags &= ~SIGQUEUE_PREALLOC;
1668 /*
1669 * If it is queued it will be freed when dequeued,
1670 * like the "regular" sigqueue.
1671 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001672 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001673 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001674 spin_unlock_irqrestore(lock, flags);
1675
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001676 if (q)
1677 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678}
1679
Eric W. Biederman24122c72018-07-20 14:30:23 -05001680int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001681{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001682 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001683 struct sigpending *pending;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001684 struct task_struct *t;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001685 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001686 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001687
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001688 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001689
1690 ret = -1;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001691 rcu_read_lock();
1692 t = pid_task(pid, type);
1693 if (!t || !likely(lock_task_sighand(t, &flags)))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001694 goto ret;
1695
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001696 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001697 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001698 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001699 goto out;
1700
1701 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001702 if (unlikely(!list_empty(&q->list))) {
1703 /*
1704 * If an SI_TIMER entry is already queue just increment
1705 * the overrun count.
1706 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001707 BUG_ON(q->info.si_code != SI_TIMER);
1708 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001709 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001710 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001711 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001712 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001713
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001714 signalfd_notify(t, sig);
Eric W. Biederman24122c72018-07-20 14:30:23 -05001715 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001716 list_add_tail(&q->list, &pending->list);
1717 sigaddset(&pending->signal, sig);
Eric W. Biederman07296142018-07-13 21:39:13 -05001718 complete_signal(sig, t, type);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001719 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001720out:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001721 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001722 unlock_task_sighand(t, &flags);
1723ret:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001724 rcu_read_unlock();
Oleg Nesterove62e6652008-04-30 00:52:56 -07001725 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001726}
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 * Let a parent know about the death of a child.
1730 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001731 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001732 * Returns true if our parent ignored us and so we've switched to
1733 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001735bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736{
1737 struct siginfo info;
1738 unsigned long flags;
1739 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001740 bool autoreap = false;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001741 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
1743 BUG_ON(sig == -1);
1744
1745 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001746 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Tejun Heod21142e2011-06-17 16:50:34 +02001748 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1750
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001751 if (sig != SIGCHLD) {
1752 /*
1753 * This is only possible if parent == real_parent.
1754 * Check if it has changed security domain.
1755 */
1756 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1757 sig = SIGCHLD;
1758 }
1759
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001760 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 info.si_signo = sig;
1762 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001763 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001764 * We are under tasklist_lock here so our parent is tied to
1765 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001766 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001767 * task_active_pid_ns will always return the same pid namespace
1768 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001769 *
1770 * write_lock() currently calls preempt_disable() which is the
1771 * same as rcu_read_lock(), but according to Oleg, this is not
1772 * correct to rely on this
1773 */
1774 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001775 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001776 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1777 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001778 rcu_read_unlock();
1779
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001780 task_cputime(tsk, &utime, &stime);
1781 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1782 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
1784 info.si_status = tsk->exit_code & 0x7f;
1785 if (tsk->exit_code & 0x80)
1786 info.si_code = CLD_DUMPED;
1787 else if (tsk->exit_code & 0x7f)
1788 info.si_code = CLD_KILLED;
1789 else {
1790 info.si_code = CLD_EXITED;
1791 info.si_status = tsk->exit_code >> 8;
1792 }
1793
1794 psig = tsk->parent->sighand;
1795 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001796 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1798 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1799 /*
1800 * We are exiting and our parent doesn't care. POSIX.1
1801 * defines special semantics for setting SIGCHLD to SIG_IGN
1802 * or setting the SA_NOCLDWAIT flag: we should be reaped
1803 * automatically and not left for our parent's wait4 call.
1804 * Rather than having the parent do it as a magic kind of
1805 * signal handler, we just set this to tell do_exit that we
1806 * can be cleaned up without becoming a zombie. Note that
1807 * we still call __wake_up_parent in this case, because a
1808 * blocked sys_wait4 might now return -ECHILD.
1809 *
1810 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1811 * is implementation-defined: we do (if you don't want
1812 * it, just use SIG_IGN instead).
1813 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001814 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001816 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001818 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 __group_send_sig_info(sig, &info, tsk->parent);
1820 __wake_up_parent(tsk, tsk->parent);
1821 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001822
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001823 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824}
1825
Tejun Heo75b95952011-03-23 10:37:01 +01001826/**
1827 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1828 * @tsk: task reporting the state change
1829 * @for_ptracer: the notification is for ptracer
1830 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1831 *
1832 * Notify @tsk's parent that the stopped/continued state has changed. If
1833 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1834 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1835 *
1836 * CONTEXT:
1837 * Must be called with tasklist_lock at least read locked.
1838 */
1839static void do_notify_parent_cldstop(struct task_struct *tsk,
1840 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841{
1842 struct siginfo info;
1843 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001844 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 struct sighand_struct *sighand;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001846 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
Tejun Heo75b95952011-03-23 10:37:01 +01001848 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001849 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001850 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001851 tsk = tsk->group_leader;
1852 parent = tsk->real_parent;
1853 }
1854
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001855 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 info.si_signo = SIGCHLD;
1857 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001858 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001859 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001860 */
1861 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001862 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001863 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001864 rcu_read_unlock();
1865
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001866 task_cputime(tsk, &utime, &stime);
1867 info.si_utime = nsec_to_clock_t(utime);
1868 info.si_stime = nsec_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869
1870 info.si_code = why;
1871 switch (why) {
1872 case CLD_CONTINUED:
1873 info.si_status = SIGCONT;
1874 break;
1875 case CLD_STOPPED:
1876 info.si_status = tsk->signal->group_exit_code & 0x7f;
1877 break;
1878 case CLD_TRAPPED:
1879 info.si_status = tsk->exit_code & 0x7f;
1880 break;
1881 default:
1882 BUG();
1883 }
1884
1885 sighand = parent->sighand;
1886 spin_lock_irqsave(&sighand->siglock, flags);
1887 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1888 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1889 __group_send_sig_info(SIGCHLD, &info, parent);
1890 /*
1891 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1892 */
1893 __wake_up_parent(tsk, parent);
1894 spin_unlock_irqrestore(&sighand->siglock, flags);
1895}
1896
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001897static inline int may_ptrace_stop(void)
1898{
Tejun Heod21142e2011-06-17 16:50:34 +02001899 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001900 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001901 /*
1902 * Are we in the middle of do_coredump?
1903 * If so and our tracer is also part of the coredump stopping
1904 * is a deadlock situation, and pointless because our tracer
1905 * is dead so don't allow us to stop.
1906 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001907 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001908 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001909 *
1910 * This is almost outdated, a task with the pending SIGKILL can't
1911 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1912 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001913 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001914 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001915 unlikely(current->mm == current->parent->mm))
1916 return 0;
1917
1918 return 1;
1919}
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001922 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001923 * Called with the siglock held.
1924 */
1925static int sigkill_pending(struct task_struct *tsk)
1926{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001927 return sigismember(&tsk->pending.signal, SIGKILL) ||
1928 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001929}
1930
1931/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 * This must be called with current->sighand->siglock held.
1933 *
1934 * This should be the path for all ptrace stops.
1935 * We always set current->last_siginfo while stopped here.
1936 * That makes it a way to test a stopped process for
1937 * being ptrace-stopped vs being job-control-stopped.
1938 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001939 * If we actually decide not to stop at all because the tracer
1940 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001942static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001943 __releases(&current->sighand->siglock)
1944 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001946 bool gstop_done = false;
1947
Roland McGrath1a669c22008-02-06 01:37:37 -08001948 if (arch_ptrace_stop_needed(exit_code, info)) {
1949 /*
1950 * The arch code has something special to do before a
1951 * ptrace stop. This is allowed to block, e.g. for faults
1952 * on user stack pages. We can't keep the siglock while
1953 * calling arch_ptrace_stop, so we must release it now.
1954 * To preserve proper semantics, we must do this before
1955 * any signal bookkeeping like checking group_stop_count.
1956 * Meanwhile, a SIGKILL could come in before we retake the
1957 * siglock. That must prevent us from sleeping in TASK_TRACED.
1958 * So after regaining the lock, we must check for SIGKILL.
1959 */
1960 spin_unlock_irq(&current->sighand->siglock);
1961 arch_ptrace_stop(exit_code, info);
1962 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001963 if (sigkill_pending(current))
1964 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001965 }
1966
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02001967 set_special_state(TASK_TRACED);
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001970 * We're committing to trapping. TRACED should be visible before
1971 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1972 * Also, transition to TRACED and updates to ->jobctl should be
1973 * atomic with respect to siglock and should be done after the arch
1974 * hook as siglock is released and regrabbed across it.
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02001975 *
1976 * TRACER TRACEE
1977 *
1978 * ptrace_attach()
1979 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1980 * do_wait()
1981 * set_current_state() smp_wmb();
1982 * ptrace_do_wait()
1983 * wait_task_stopped()
1984 * task_stopped_code()
1985 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 */
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02001987 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
1989 current->last_siginfo = info;
1990 current->exit_code = exit_code;
1991
Tejun Heod79fdd62011-03-23 10:37:00 +01001992 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 * If @why is CLD_STOPPED, we're trapping to participate in a group
1994 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001995 * across siglock relocks since INTERRUPT was scheduled, PENDING
1996 * could be clear now. We act as if SIGCONT is received after
1997 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001998 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001999 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002001
Tejun Heofb1d9102011-06-14 11:20:17 +02002002 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02002003 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02002004 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2005 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02002006
Tejun Heo81be24b2011-06-02 11:13:59 +02002007 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002008 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 spin_unlock_irq(&current->sighand->siglock);
2011 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07002012 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01002013 /*
2014 * Notify parents of the stop.
2015 *
2016 * While ptraced, there are two parents - the ptracer and
2017 * the real_parent of the group_leader. The ptracer should
2018 * know about every stop while the real parent is only
2019 * interested in the completion of group stop. The states
2020 * for the two don't interact with each other. Notify
2021 * separately unless they're gonna be duplicates.
2022 */
2023 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002024 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01002025 do_notify_parent_cldstop(current, false, why);
2026
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002027 /*
2028 * Don't want to allow preemption here, because
2029 * sys_ptrace() needs this task to be inactive.
2030 *
2031 * XXX: implement read_unlock_no_resched().
2032 */
2033 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002035 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002036 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 } else {
2038 /*
2039 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002040 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01002041 *
2042 * If @gstop_done, the ptracer went away between group stop
2043 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02002044 * JOBCTL_STOP_PENDING on us and we'll re-enter
2045 * TASK_STOPPED in do_signal_stop() on return, so notifying
2046 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01002048 if (gstop_done)
2049 do_notify_parent_cldstop(current, false, why);
2050
Oleg Nesterov9899d112013-01-21 20:48:00 +01002051 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002052 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08002053 if (clear_code)
2054 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002055 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 }
2057
2058 /*
2059 * We are back. Now reacquire the siglock before touching
2060 * last_siginfo, so that we are sure to have synchronized with
2061 * any signal-sending on another CPU that wants to examine it.
2062 */
2063 spin_lock_irq(&current->sighand->siglock);
2064 current->last_siginfo = NULL;
2065
Tejun Heo544b2c92011-06-14 11:20:18 +02002066 /* LISTENING can be set only during STOP traps, clear it */
2067 current->jobctl &= ~JOBCTL_LISTENING;
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 /*
2070 * Queued signals ignored us while we were stopped for tracing.
2071 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07002072 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07002074 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075}
2076
Tejun Heo3544d722011-06-14 11:20:15 +02002077static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078{
2079 siginfo_t info;
2080
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002081 clear_siginfo(&info);
Tejun Heo3544d722011-06-14 11:20:15 +02002082 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002084 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002085 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02002088 ptrace_stop(exit_code, why, 1, &info);
2089}
2090
2091void ptrace_notify(int exit_code)
2092{
2093 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002094 if (unlikely(current->task_works))
2095 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02002096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02002098 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 spin_unlock_irq(&current->sighand->siglock);
2100}
2101
Tejun Heo73ddff22011-06-14 11:20:14 +02002102/**
2103 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2104 * @signr: signr causing group stop if initiating
2105 *
2106 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2107 * and participate in it. If already set, participate in the existing
2108 * group stop. If participated in a group stop (and thus slept), %true is
2109 * returned with siglock released.
2110 *
2111 * If ptraced, this function doesn't handle stop itself. Instead,
2112 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2113 * untouched. The caller must ensure that INTERRUPT trap handling takes
2114 * places afterwards.
2115 *
2116 * CONTEXT:
2117 * Must be called with @current->sighand->siglock held, which is released
2118 * on %true return.
2119 *
2120 * RETURNS:
2121 * %false if group stop is already cancelled or ptrace trap is scheduled.
2122 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002124static bool do_signal_stop(int signr)
2125 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126{
2127 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
Tejun Heoa8f072c2011-06-02 11:13:59 +02002129 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002130 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002131 struct task_struct *t;
2132
Tejun Heoa8f072c2011-06-02 11:13:59 +02002133 /* signr will be recorded in task->jobctl for retries */
2134 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002135
Tejun Heoa8f072c2011-06-02 11:13:59 +02002136 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002137 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002138 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002140 * There is no group stop already in progress. We must
2141 * initiate one now.
2142 *
2143 * While ptraced, a task may be resumed while group stop is
2144 * still in effect and then receive a stop signal and
2145 * initiate another group stop. This deviates from the
2146 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002147 * cause two group stops when !ptraced. That is why we
2148 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002149 *
2150 * The condition can be distinguished by testing whether
2151 * SIGNAL_STOP_STOPPED is already set. Don't generate
2152 * group_exit_code in such case.
2153 *
2154 * This is not necessary for SIGNAL_STOP_CONTINUED because
2155 * an intervening stop signal is required to cause two
2156 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002158 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2159 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002160
Tejun Heo7dd3db52011-06-02 11:14:00 +02002161 sig->group_stop_count = 0;
2162
2163 if (task_set_jobctl_pending(current, signr | gstop))
2164 sig->group_stop_count++;
2165
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002166 t = current;
2167 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002168 /*
2169 * Setting state to TASK_STOPPED for a group
2170 * stop is always done with the siglock held,
2171 * so this check has no races.
2172 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002173 if (!task_is_stopped(t) &&
2174 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002175 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002176 if (likely(!(t->ptrace & PT_SEIZED)))
2177 signal_wake_up(t, 0);
2178 else
2179 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002180 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002181 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002182 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002183
Tejun Heod21142e2011-06-17 16:50:34 +02002184 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002185 int notify = 0;
2186
2187 /*
2188 * If there are no other threads in the group, or if there
2189 * is a group stop in progress and we are the last to stop,
2190 * report to the parent.
2191 */
2192 if (task_participate_group_stop(current))
2193 notify = CLD_STOPPED;
2194
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002195 set_special_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002196 spin_unlock_irq(&current->sighand->siglock);
2197
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002198 /*
2199 * Notify the parent of the group stop completion. Because
2200 * we're not holding either the siglock or tasklist_lock
2201 * here, ptracer may attach inbetween; however, this is for
2202 * group stop and should always be delivered to the real
2203 * parent of the group leader. The new ptracer will get
2204 * its notification when this task transitions into
2205 * TASK_TRACED.
2206 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002207 if (notify) {
2208 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002209 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002210 read_unlock(&tasklist_lock);
2211 }
2212
2213 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002214 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002215 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002216 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002217 /*
2218 * While ptraced, group stop is handled by STOP trap.
2219 * Schedule it and let the caller deal with it.
2220 */
2221 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2222 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002223 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002224}
Tejun Heod79fdd62011-03-23 10:37:00 +01002225
Tejun Heo73ddff22011-06-14 11:20:14 +02002226/**
2227 * do_jobctl_trap - take care of ptrace jobctl traps
2228 *
Tejun Heo3544d722011-06-14 11:20:15 +02002229 * When PT_SEIZED, it's used for both group stop and explicit
2230 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2231 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2232 * the stop signal; otherwise, %SIGTRAP.
2233 *
2234 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2235 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002236 *
2237 * CONTEXT:
2238 * Must be called with @current->sighand->siglock held, which may be
2239 * released and re-acquired before returning with intervening sleep.
2240 */
2241static void do_jobctl_trap(void)
2242{
Tejun Heo3544d722011-06-14 11:20:15 +02002243 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002244 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002245
Tejun Heo3544d722011-06-14 11:20:15 +02002246 if (current->ptrace & PT_SEIZED) {
2247 if (!signal->group_stop_count &&
2248 !(signal->flags & SIGNAL_STOP_STOPPED))
2249 signr = SIGTRAP;
2250 WARN_ON_ONCE(!signr);
2251 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2252 CLD_STOPPED);
2253 } else {
2254 WARN_ON_ONCE(!signr);
2255 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002256 current->exit_code = 0;
2257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258}
2259
Al Viro94eb22d2012-11-05 13:08:06 -05002260static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002261{
Oleg Nesterov8a352412011-07-21 17:06:53 +02002262 /*
2263 * We do not check sig_kernel_stop(signr) but set this marker
2264 * unconditionally because we do not know whether debugger will
2265 * change signr. This flag has no meaning unless we are going
2266 * to stop after return from ptrace_stop(). In this case it will
2267 * be checked in do_signal_stop(), we should only stop if it was
2268 * not cleared by SIGCONT while we were sleeping. See also the
2269 * comment in dequeue_signal().
2270 */
2271 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002272 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002273
2274 /* We're back. Did the debugger cancel the sig? */
2275 signr = current->exit_code;
2276 if (signr == 0)
2277 return signr;
2278
2279 current->exit_code = 0;
2280
Randy Dunlap5aba0852011-04-04 14:59:31 -07002281 /*
2282 * Update the siginfo structure if the signal has
2283 * changed. If the debugger wanted something
2284 * specific in the siginfo structure then it should
2285 * have updated *info via PTRACE_SETSIGINFO.
2286 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002287 if (signr != info->si_signo) {
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002288 clear_siginfo(info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002289 info->si_signo = signr;
2290 info->si_errno = 0;
2291 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002292 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002293 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002294 info->si_uid = from_kuid_munged(current_user_ns(),
2295 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002296 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002297 }
2298
2299 /* If the (new) signal is now blocked, requeue it. */
2300 if (sigismember(&current->blocked, signr)) {
2301 specific_send_sig_info(signr, info, current);
2302 signr = 0;
2303 }
2304
2305 return signr;
2306}
2307
Richard Weinberger828b1f62013-10-07 15:26:57 +02002308int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002310 struct sighand_struct *sighand = current->sighand;
2311 struct signal_struct *signal = current->signal;
2312 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002314 if (unlikely(current->task_works))
2315 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002316
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302317 if (unlikely(uprobe_deny_signal()))
2318 return 0;
2319
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002320 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002321 * Do this once, we can't return to user-mode if freezing() == T.
2322 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2323 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002324 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002325 try_to_freeze();
2326
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002327relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002328 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002329 /*
2330 * Every stopped thread goes here after wakeup. Check to see if
2331 * we should notify the parent, prepare_signal(SIGCONT) encodes
2332 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2333 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002334 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002335 int why;
2336
2337 if (signal->flags & SIGNAL_CLD_CONTINUED)
2338 why = CLD_CONTINUED;
2339 else
2340 why = CLD_STOPPED;
2341
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002342 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002343
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002344 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002345
Tejun Heoceb6bd62011-03-23 10:37:01 +01002346 /*
2347 * Notify the parent that we're continuing. This event is
2348 * always per-process and doesn't make whole lot of sense
2349 * for ptracers, who shouldn't consume the state via
2350 * wait(2) either, but, for backward compatibility, notify
2351 * the ptracer of the group leader too unless it's gonna be
2352 * a duplicate.
2353 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002354 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002355 do_notify_parent_cldstop(current, false, why);
2356
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002357 if (ptrace_reparented(current->group_leader))
2358 do_notify_parent_cldstop(current->group_leader,
2359 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002360 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002361
Oleg Nesterove4420552008-04-30 00:52:44 -07002362 goto relock;
2363 }
2364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 for (;;) {
2366 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002367
2368 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2369 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002370 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002371
Tejun Heo73ddff22011-06-14 11:20:14 +02002372 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2373 do_jobctl_trap();
2374 spin_unlock_irq(&sighand->siglock);
2375 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 }
2377
Richard Weinberger828b1f62013-10-07 15:26:57 +02002378 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Tejun Heodd1d6772011-06-02 11:14:00 +02002380 if (!signr)
2381 break; /* will return 0 */
2382
Oleg Nesterov8a352412011-07-21 17:06:53 +02002383 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002384 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002386 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 }
2388
Tejun Heodd1d6772011-06-02 11:14:00 +02002389 ka = &sighand->action[signr-1];
2390
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002391 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002392 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2395 continue;
2396 if (ka->sa.sa_handler != SIG_DFL) {
2397 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002398 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400 if (ka->sa.sa_flags & SA_ONESHOT)
2401 ka->sa.sa_handler = SIG_DFL;
2402
2403 break; /* will return non-zero "signr" value */
2404 }
2405
2406 /*
2407 * Now we are doing the default action for this signal.
2408 */
2409 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2410 continue;
2411
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002412 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002413 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002414 * Container-init gets no signals it doesn't want from same
2415 * container.
2416 *
2417 * Note that if global/container-init sees a sig_kernel_only()
2418 * signal here, the signal must have been generated internally
2419 * or must have come from an ancestor namespace. In either
2420 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002421 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002422 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002423 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 continue;
2425
2426 if (sig_kernel_stop(signr)) {
2427 /*
2428 * The default action is to stop all threads in
2429 * the thread group. The job control signals
2430 * do nothing in an orphaned pgrp, but SIGSTOP
2431 * always works. Note that siglock needs to be
2432 * dropped during the call to is_orphaned_pgrp()
2433 * because of lock ordering with tasklist_lock.
2434 * This allows an intervening SIGCONT to be posted.
2435 * We need to check for that and bail out if necessary.
2436 */
2437 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002438 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440 /* signals can be posted during this window */
2441
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002442 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 goto relock;
2444
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002445 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 }
2447
Richard Weinberger828b1f62013-10-07 15:26:57 +02002448 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 /* It released the siglock. */
2450 goto relock;
2451 }
2452
2453 /*
2454 * We didn't actually stop, due to a race
2455 * with SIGCONT or something like that.
2456 */
2457 continue;
2458 }
2459
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002460 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462 /*
2463 * Anything else is fatal, maybe with a core dump.
2464 */
2465 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002466
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002468 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002469 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002470 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 /*
2472 * If it was able to dump core, this kills all
2473 * other threads in the group and synchronizes with
2474 * their demise. If we lost the race with another
2475 * thread getting here, it set group_exit_code
2476 * first and our do_group_exit call below will use
2477 * that value and ignore the one we pass it.
2478 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002479 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 }
2481
2482 /*
2483 * Death signals, no core dump.
2484 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002485 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 /* NOTREACHED */
2487 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002488 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002489
2490 ksig->sig = signr;
2491 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492}
2493
Matt Fleming5e6292c2012-01-10 15:11:17 -08002494/**
Al Viroefee9842012-04-28 02:04:15 -04002495 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002496 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002497 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002498 *
Masanari Iidae2278672014-02-18 22:54:36 +09002499 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002500 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002501 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002502 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002503 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002504static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002505{
2506 sigset_t blocked;
2507
Al Viroa610d6e2012-05-21 23:42:15 -04002508 /* A signal was successfully delivered, and the
2509 saved sigmask was stored on the signal frame,
2510 and will be restored by sigreturn. So we can
2511 simply clear the restore sigmask flag. */
2512 clear_restore_sigmask();
2513
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002514 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2515 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2516 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002517 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002518 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002519}
2520
Al Viro2ce5da12012-11-07 15:11:25 -05002521void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2522{
2523 if (failed)
2524 force_sigsegv(ksig->sig, current);
2525 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002526 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002527}
2528
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002529/*
2530 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002531 * group-wide signal. Other threads should be notified now to take
2532 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002533 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002534static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002535{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002536 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002537 struct task_struct *t;
2538
Oleg Nesterovf646e222011-04-27 19:18:39 +02002539 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2540 if (sigisemptyset(&retarget))
2541 return;
2542
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002543 t = tsk;
2544 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002545 if (t->flags & PF_EXITING)
2546 continue;
2547
2548 if (!has_pending_signals(&retarget, &t->blocked))
2549 continue;
2550 /* Remove the signals this thread can handle. */
2551 sigandsets(&retarget, &retarget, &t->blocked);
2552
2553 if (!signal_pending(t))
2554 signal_wake_up(t, 0);
2555
2556 if (sigisemptyset(&retarget))
2557 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002558 }
2559}
2560
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002561void exit_signals(struct task_struct *tsk)
2562{
2563 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002564 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002565
Tejun Heo77e4ef92011-12-12 18:12:21 -08002566 /*
2567 * @tsk is about to have PF_EXITING set - lock out users which
2568 * expect stable threadgroup.
2569 */
Ingo Molnar780de9d2017-02-02 11:50:56 +01002570 cgroup_threadgroup_change_begin(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002571
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002572 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2573 tsk->flags |= PF_EXITING;
Ingo Molnar780de9d2017-02-02 11:50:56 +01002574 cgroup_threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002575 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002576 }
2577
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002578 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002579 /*
2580 * From now this task is not visible for group-wide signals,
2581 * see wants_signal(), do_signal_stop().
2582 */
2583 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002584
Ingo Molnar780de9d2017-02-02 11:50:56 +01002585 cgroup_threadgroup_change_end(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002586
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002587 if (!signal_pending(tsk))
2588 goto out;
2589
Oleg Nesterovf646e222011-04-27 19:18:39 +02002590 unblocked = tsk->blocked;
2591 signotset(&unblocked);
2592 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002593
Tejun Heoa8f072c2011-06-02 11:13:59 +02002594 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c1902e2011-03-23 10:37:00 +01002595 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002596 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002597out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002598 spin_unlock_irq(&tsk->sighand->siglock);
2599
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002600 /*
2601 * If group stop has completed, deliver the notification. This
2602 * should always go to the real parent of the group leader.
2603 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002604 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002605 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002606 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002607 read_unlock(&tasklist_lock);
2608 }
2609}
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611EXPORT_SYMBOL(recalc_sigpending);
2612EXPORT_SYMBOL_GPL(dequeue_signal);
2613EXPORT_SYMBOL(flush_signals);
2614EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615EXPORT_SYMBOL(send_sig);
2616EXPORT_SYMBOL(send_sig_info);
2617EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
2619/*
2620 * System call entry points.
2621 */
2622
Randy Dunlap41c57892011-04-04 15:00:26 -07002623/**
2624 * sys_restart_syscall - restart a system call
2625 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002626SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002628 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 return restart->fn(restart);
2630}
2631
2632long do_no_restart_syscall(struct restart_block *param)
2633{
2634 return -EINTR;
2635}
2636
Oleg Nesterovb1828012011-04-27 21:56:14 +02002637static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2638{
2639 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2640 sigset_t newblocked;
2641 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002642 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002643 retarget_shared_pending(tsk, &newblocked);
2644 }
2645 tsk->blocked = *newset;
2646 recalc_sigpending();
2647}
2648
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002649/**
2650 * set_current_blocked - change current->blocked mask
2651 * @newset: new mask
2652 *
2653 * It is wrong to change ->blocked directly, this helper should be used
2654 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 */
Al Viro77097ae2012-04-27 13:58:59 -04002656void set_current_blocked(sigset_t *newset)
2657{
Al Viro77097ae2012-04-27 13:58:59 -04002658 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002659 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002660}
2661
2662void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002663{
2664 struct task_struct *tsk = current;
2665
Waiman Longc7be96a2016-12-14 15:04:10 -08002666 /*
2667 * In case the signal mask hasn't changed, there is nothing we need
2668 * to do. The current->blocked shouldn't be modified by other task.
2669 */
2670 if (sigequalsets(&tsk->blocked, newset))
2671 return;
2672
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002673 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002674 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002675 spin_unlock_irq(&tsk->sighand->siglock);
2676}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
2678/*
2679 * This is also useful for kernel threads that want to temporarily
2680 * (or permanently) block certain signals.
2681 *
2682 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2683 * interface happily blocks "unblockable" signals like SIGKILL
2684 * and friends.
2685 */
2686int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2687{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002688 struct task_struct *tsk = current;
2689 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002691 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002692 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002693 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002694
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 switch (how) {
2696 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002697 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 break;
2699 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002700 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 break;
2702 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002703 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 break;
2705 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002706 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002708
Al Viro77097ae2012-04-27 13:58:59 -04002709 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002710 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711}
2712
Randy Dunlap41c57892011-04-04 15:00:26 -07002713/**
2714 * sys_rt_sigprocmask - change the list of currently blocked signals
2715 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002716 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002717 * @oset: previous value of signal mask if non-null
2718 * @sigsetsize: size of sigset_t type
2719 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002720SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002721 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002724 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
2726 /* XXX: Don't preclude handling different sized sigset_t's. */
2727 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002728 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002730 old_set = current->blocked;
2731
2732 if (nset) {
2733 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2734 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2736
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002737 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002739 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002741
2742 if (oset) {
2743 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2744 return -EFAULT;
2745 }
2746
2747 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
Al Viro322a56c2012-12-25 13:32:58 -05002750#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002751COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2752 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753{
Al Viro322a56c2012-12-25 13:32:58 -05002754 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Al Viro322a56c2012-12-25 13:32:58 -05002756 /* XXX: Don't preclude handling different sized sigset_t's. */
2757 if (sigsetsize != sizeof(sigset_t))
2758 return -EINVAL;
2759
2760 if (nset) {
Al Viro322a56c2012-12-25 13:32:58 -05002761 sigset_t new_set;
2762 int error;
Al Viro3968cf62017-09-03 21:45:17 -04002763 if (get_compat_sigset(&new_set, nset))
Al Viro322a56c2012-12-25 13:32:58 -05002764 return -EFAULT;
Al Viro322a56c2012-12-25 13:32:58 -05002765 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2766
2767 error = sigprocmask(how, &new_set, NULL);
2768 if (error)
2769 return error;
2770 }
Dmitry V. Levinf4543222017-08-22 02:16:11 +03002771 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
Al Viro322a56c2012-12-25 13:32:58 -05002772}
2773#endif
Al Viro322a56c2012-12-25 13:32:58 -05002774
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002775static int do_sigpending(sigset_t *set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002778 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 &current->signal->shared_pending.signal);
2780 spin_unlock_irq(&current->sighand->siglock);
2781
2782 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002783 sigandsets(set, &current->blocked, set);
2784 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002785}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
Randy Dunlap41c57892011-04-04 15:00:26 -07002787/**
2788 * sys_rt_sigpending - examine a pending signal that has been raised
2789 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002790 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002791 * @sigsetsize: size of sigset_t type or larger
2792 */
Al Virofe9c1db2012-12-25 14:31:38 -05002793SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794{
Al Virofe9c1db2012-12-25 14:31:38 -05002795 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002796 int err;
2797
2798 if (sigsetsize > sizeof(*uset))
2799 return -EINVAL;
2800
2801 err = do_sigpending(&set);
Al Virofe9c1db2012-12-25 14:31:38 -05002802 if (!err && copy_to_user(uset, &set, sigsetsize))
2803 err = -EFAULT;
2804 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805}
2806
Al Virofe9c1db2012-12-25 14:31:38 -05002807#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002808COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2809 compat_size_t, sigsetsize)
2810{
Al Virofe9c1db2012-12-25 14:31:38 -05002811 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002812 int err;
2813
2814 if (sigsetsize > sizeof(*uset))
2815 return -EINVAL;
2816
2817 err = do_sigpending(&set);
Dmitry V. Levinf4543222017-08-22 02:16:11 +03002818 if (!err)
2819 err = put_compat_sigset(uset, &set, sigsetsize);
Al Virofe9c1db2012-12-25 14:31:38 -05002820 return err;
Al Virofe9c1db2012-12-25 14:31:38 -05002821}
2822#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002823
Eric W. Biedermancc731522017-07-16 22:36:59 -05002824enum siginfo_layout siginfo_layout(int sig, int si_code)
2825{
2826 enum siginfo_layout layout = SIL_KILL;
2827 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2828 static const struct {
2829 unsigned char limit, layout;
2830 } filter[] = {
2831 [SIGILL] = { NSIGILL, SIL_FAULT },
2832 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2833 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2834 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2835 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
Andrew Claytonc3aff082017-11-01 15:49:59 +00002836#if defined(SIGEMT) && defined(NSIGEMT)
Eric W. Biedermancc731522017-07-16 22:36:59 -05002837 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2838#endif
2839 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2840 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002841 [SIGSYS] = { NSIGSYS, SIL_SYS },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002842 };
Eric W. Biederman31931c92018-04-24 20:59:47 -05002843 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
Eric W. Biedermancc731522017-07-16 22:36:59 -05002844 layout = filter[sig].layout;
Eric W. Biederman31931c92018-04-24 20:59:47 -05002845 /* Handle the exceptions */
2846 if ((sig == SIGBUS) &&
2847 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2848 layout = SIL_FAULT_MCEERR;
2849 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2850 layout = SIL_FAULT_BNDERR;
2851#ifdef SEGV_PKUERR
2852 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2853 layout = SIL_FAULT_PKUERR;
2854#endif
2855 }
Eric W. Biedermancc731522017-07-16 22:36:59 -05002856 else if (si_code <= NSIGPOLL)
2857 layout = SIL_POLL;
2858 } else {
2859 if (si_code == SI_TIMER)
2860 layout = SIL_TIMER;
2861 else if (si_code == SI_SIGIO)
2862 layout = SIL_POLL;
2863 else if (si_code < 0)
2864 layout = SIL_RT;
Eric W. Biedermancc731522017-07-16 22:36:59 -05002865 }
2866 return layout;
2867}
2868
Al Viroce395962013-10-13 17:23:53 -04002869int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870{
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002871 if (copy_to_user(to, from , sizeof(struct siginfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 return -EFAULT;
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002873 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874}
2875
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002876#ifdef CONFIG_COMPAT
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002877int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2878 const struct siginfo *from)
2879#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2880{
2881 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2882}
2883int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2884 const struct siginfo *from, bool x32_ABI)
2885#endif
2886{
2887 struct compat_siginfo new;
2888 memset(&new, 0, sizeof(new));
2889
2890 new.si_signo = from->si_signo;
2891 new.si_errno = from->si_errno;
2892 new.si_code = from->si_code;
2893 switch(siginfo_layout(from->si_signo, from->si_code)) {
2894 case SIL_KILL:
2895 new.si_pid = from->si_pid;
2896 new.si_uid = from->si_uid;
2897 break;
2898 case SIL_TIMER:
2899 new.si_tid = from->si_tid;
2900 new.si_overrun = from->si_overrun;
2901 new.si_int = from->si_int;
2902 break;
2903 case SIL_POLL:
2904 new.si_band = from->si_band;
2905 new.si_fd = from->si_fd;
2906 break;
2907 case SIL_FAULT:
2908 new.si_addr = ptr_to_compat(from->si_addr);
2909#ifdef __ARCH_SI_TRAPNO
2910 new.si_trapno = from->si_trapno;
2911#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002912 break;
2913 case SIL_FAULT_MCEERR:
2914 new.si_addr = ptr_to_compat(from->si_addr);
2915#ifdef __ARCH_SI_TRAPNO
2916 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002917#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002918 new.si_addr_lsb = from->si_addr_lsb;
2919 break;
2920 case SIL_FAULT_BNDERR:
2921 new.si_addr = ptr_to_compat(from->si_addr);
2922#ifdef __ARCH_SI_TRAPNO
2923 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002924#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002925 new.si_lower = ptr_to_compat(from->si_lower);
2926 new.si_upper = ptr_to_compat(from->si_upper);
2927 break;
2928 case SIL_FAULT_PKUERR:
2929 new.si_addr = ptr_to_compat(from->si_addr);
2930#ifdef __ARCH_SI_TRAPNO
2931 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002932#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002933 new.si_pkey = from->si_pkey;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002934 break;
2935 case SIL_CHLD:
2936 new.si_pid = from->si_pid;
2937 new.si_uid = from->si_uid;
2938 new.si_status = from->si_status;
2939#ifdef CONFIG_X86_X32_ABI
2940 if (x32_ABI) {
2941 new._sifields._sigchld_x32._utime = from->si_utime;
2942 new._sifields._sigchld_x32._stime = from->si_stime;
2943 } else
2944#endif
2945 {
2946 new.si_utime = from->si_utime;
2947 new.si_stime = from->si_stime;
2948 }
2949 break;
2950 case SIL_RT:
2951 new.si_pid = from->si_pid;
2952 new.si_uid = from->si_uid;
2953 new.si_int = from->si_int;
2954 break;
2955 case SIL_SYS:
2956 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2957 new.si_syscall = from->si_syscall;
2958 new.si_arch = from->si_arch;
2959 break;
2960 }
2961
2962 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2963 return -EFAULT;
2964
2965 return 0;
2966}
2967
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002968int copy_siginfo_from_user32(struct siginfo *to,
2969 const struct compat_siginfo __user *ufrom)
2970{
2971 struct compat_siginfo from;
2972
2973 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
2974 return -EFAULT;
2975
2976 clear_siginfo(to);
2977 to->si_signo = from.si_signo;
2978 to->si_errno = from.si_errno;
2979 to->si_code = from.si_code;
2980 switch(siginfo_layout(from.si_signo, from.si_code)) {
2981 case SIL_KILL:
2982 to->si_pid = from.si_pid;
2983 to->si_uid = from.si_uid;
2984 break;
2985 case SIL_TIMER:
2986 to->si_tid = from.si_tid;
2987 to->si_overrun = from.si_overrun;
2988 to->si_int = from.si_int;
2989 break;
2990 case SIL_POLL:
2991 to->si_band = from.si_band;
2992 to->si_fd = from.si_fd;
2993 break;
2994 case SIL_FAULT:
2995 to->si_addr = compat_ptr(from.si_addr);
2996#ifdef __ARCH_SI_TRAPNO
2997 to->si_trapno = from.si_trapno;
2998#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002999 break;
3000 case SIL_FAULT_MCEERR:
3001 to->si_addr = compat_ptr(from.si_addr);
3002#ifdef __ARCH_SI_TRAPNO
3003 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003004#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003005 to->si_addr_lsb = from.si_addr_lsb;
3006 break;
3007 case SIL_FAULT_BNDERR:
3008 to->si_addr = compat_ptr(from.si_addr);
3009#ifdef __ARCH_SI_TRAPNO
3010 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003011#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003012 to->si_lower = compat_ptr(from.si_lower);
3013 to->si_upper = compat_ptr(from.si_upper);
3014 break;
3015 case SIL_FAULT_PKUERR:
3016 to->si_addr = compat_ptr(from.si_addr);
3017#ifdef __ARCH_SI_TRAPNO
3018 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003019#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003020 to->si_pkey = from.si_pkey;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003021 break;
3022 case SIL_CHLD:
3023 to->si_pid = from.si_pid;
3024 to->si_uid = from.si_uid;
3025 to->si_status = from.si_status;
3026#ifdef CONFIG_X86_X32_ABI
3027 if (in_x32_syscall()) {
3028 to->si_utime = from._sifields._sigchld_x32._utime;
3029 to->si_stime = from._sifields._sigchld_x32._stime;
3030 } else
3031#endif
3032 {
3033 to->si_utime = from.si_utime;
3034 to->si_stime = from.si_stime;
3035 }
3036 break;
3037 case SIL_RT:
3038 to->si_pid = from.si_pid;
3039 to->si_uid = from.si_uid;
3040 to->si_int = from.si_int;
3041 break;
3042 case SIL_SYS:
3043 to->si_call_addr = compat_ptr(from.si_call_addr);
3044 to->si_syscall = from.si_syscall;
3045 to->si_arch = from.si_arch;
3046 break;
3047 }
3048 return 0;
3049}
3050#endif /* CONFIG_COMPAT */
3051
Randy Dunlap41c57892011-04-04 15:00:26 -07003052/**
Oleg Nesterov943df142011-04-27 21:44:14 +02003053 * do_sigtimedwait - wait for queued signals specified in @which
3054 * @which: queued signals to wait for
3055 * @info: if non-null, the signal's siginfo is returned here
3056 * @ts: upper bound on process time suspension
3057 */
Al Viro1b3c8722017-05-31 04:46:17 -04003058static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003059 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02003060{
Thomas Gleixner2456e852016-12-25 11:38:40 +01003061 ktime_t *to = NULL, timeout = KTIME_MAX;
Oleg Nesterov943df142011-04-27 21:44:14 +02003062 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02003063 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003064 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02003065
3066 if (ts) {
3067 if (!timespec_valid(ts))
3068 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003069 timeout = timespec_to_ktime(*ts);
3070 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02003071 }
3072
3073 /*
3074 * Invert the set of allowed signals to get those we want to block.
3075 */
3076 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3077 signotset(&mask);
3078
3079 spin_lock_irq(&tsk->sighand->siglock);
3080 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2456e852016-12-25 11:38:40 +01003081 if (!sig && timeout) {
Oleg Nesterov943df142011-04-27 21:44:14 +02003082 /*
3083 * None ready, temporarily unblock those we're interested
3084 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02003085 * they arrive. Unblocking is always fine, we can avoid
3086 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02003087 */
3088 tsk->real_blocked = tsk->blocked;
3089 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3090 recalc_sigpending();
3091 spin_unlock_irq(&tsk->sighand->siglock);
3092
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003093 __set_current_state(TASK_INTERRUPTIBLE);
3094 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3095 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02003096 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003097 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07003098 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003099 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02003100 }
3101 spin_unlock_irq(&tsk->sighand->siglock);
3102
3103 if (sig)
3104 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003105 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02003106}
3107
3108/**
Randy Dunlap41c57892011-04-04 15:00:26 -07003109 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3110 * in @uthese
3111 * @uthese: queued signals to wait for
3112 * @uinfo: if non-null, the signal's siginfo is returned here
3113 * @uts: upper bound on process time suspension
3114 * @sigsetsize: size of sigset_t type
3115 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003116SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3117 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3118 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 sigset_t these;
3121 struct timespec ts;
3122 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02003123 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124
3125 /* XXX: Don't preclude handling different sized sigset_t's. */
3126 if (sigsetsize != sizeof(sigset_t))
3127 return -EINVAL;
3128
3129 if (copy_from_user(&these, uthese, sizeof(these)))
3130 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07003131
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 if (uts) {
3133 if (copy_from_user(&ts, uts, sizeof(ts)))
3134 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 }
3136
Oleg Nesterov943df142011-04-27 21:44:14 +02003137 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138
Oleg Nesterov943df142011-04-27 21:44:14 +02003139 if (ret > 0 && uinfo) {
3140 if (copy_siginfo_to_user(uinfo, &info))
3141 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 }
3143
3144 return ret;
3145}
3146
Al Viro1b3c8722017-05-31 04:46:17 -04003147#ifdef CONFIG_COMPAT
3148COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3149 struct compat_siginfo __user *, uinfo,
3150 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3151{
Al Viro1b3c8722017-05-31 04:46:17 -04003152 sigset_t s;
3153 struct timespec t;
3154 siginfo_t info;
3155 long ret;
3156
3157 if (sigsetsize != sizeof(sigset_t))
3158 return -EINVAL;
3159
Al Viro3968cf62017-09-03 21:45:17 -04003160 if (get_compat_sigset(&s, uthese))
Al Viro1b3c8722017-05-31 04:46:17 -04003161 return -EFAULT;
Al Viro1b3c8722017-05-31 04:46:17 -04003162
3163 if (uts) {
3164 if (compat_get_timespec(&t, uts))
3165 return -EFAULT;
3166 }
3167
3168 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3169
3170 if (ret > 0 && uinfo) {
3171 if (copy_siginfo_to_user32(uinfo, &info))
3172 ret = -EFAULT;
3173 }
3174
3175 return ret;
3176}
3177#endif
3178
Randy Dunlap41c57892011-04-04 15:00:26 -07003179/**
3180 * sys_kill - send a signal to a process
3181 * @pid: the PID of the process
3182 * @sig: signal to be sent
3183 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003184SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185{
3186 struct siginfo info;
3187
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06003188 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 info.si_signo = sig;
3190 info.si_errno = 0;
3191 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003192 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003193 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194
3195 return kill_something_info(sig, &info, pid);
3196}
3197
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003198static int
3199do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003200{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003201 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003202 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003203
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003204 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003205 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003206 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003207 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003208 /*
3209 * The null signal is a permissions and process existence
3210 * probe. No signal is actually delivered.
3211 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003212 if (!error && sig) {
Eric W. Biederman40b3b022018-07-21 10:45:15 -05003213 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003214 /*
3215 * If lock_task_sighand() failed we pretend the task
3216 * dies after receiving the signal. The window is tiny,
3217 * and the signal is private anyway.
3218 */
3219 if (unlikely(error == -ESRCH))
3220 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003221 }
3222 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003223 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003224
3225 return error;
3226}
3227
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003228static int do_tkill(pid_t tgid, pid_t pid, int sig)
3229{
Eric W. Biederman5f749722018-01-22 14:58:57 -06003230 struct siginfo info;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003231
Eric W. Biederman5f749722018-01-22 14:58:57 -06003232 clear_siginfo(&info);
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003233 info.si_signo = sig;
3234 info.si_errno = 0;
3235 info.si_code = SI_TKILL;
3236 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003237 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003238
3239 return do_send_specific(tgid, pid, sig, &info);
3240}
3241
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242/**
3243 * sys_tgkill - send signal to one specific thread
3244 * @tgid: the thread group ID of the thread
3245 * @pid: the PID of the thread
3246 * @sig: signal to be sent
3247 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003248 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 * exists but it's not belonging to the target process anymore. This
3250 * method solves the problem of threads exiting and PIDs getting reused.
3251 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003252SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 /* This is only valid for single tasks */
3255 if (pid <= 0 || tgid <= 0)
3256 return -EINVAL;
3257
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003258 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259}
3260
Randy Dunlap41c57892011-04-04 15:00:26 -07003261/**
3262 * sys_tkill - send signal to one specific task
3263 * @pid: the PID of the task
3264 * @sig: signal to be sent
3265 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3267 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003268SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 /* This is only valid for single tasks */
3271 if (pid <= 0)
3272 return -EINVAL;
3273
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003274 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275}
3276
Al Viro75907d42012-12-25 15:19:12 -05003277static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3278{
3279 /* Not even root can pretend to send signals from the kernel.
3280 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3281 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003282 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003283 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003284 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003285
Al Viro75907d42012-12-25 15:19:12 -05003286 info->si_signo = sig;
3287
3288 /* POSIX.1b doesn't mention process groups. */
3289 return kill_proc_info(sig, info, pid);
3290}
3291
Randy Dunlap41c57892011-04-04 15:00:26 -07003292/**
3293 * sys_rt_sigqueueinfo - send signal information to a signal
3294 * @pid: the PID of the thread
3295 * @sig: signal to be sent
3296 * @uinfo: signal info to be sent
3297 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003298SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3299 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300{
3301 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3303 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003304 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305}
3306
Al Viro75907d42012-12-25 15:19:12 -05003307#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003308COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3309 compat_pid_t, pid,
3310 int, sig,
3311 struct compat_siginfo __user *, uinfo)
3312{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003313 siginfo_t info;
Al Viro75907d42012-12-25 15:19:12 -05003314 int ret = copy_siginfo_from_user32(&info, uinfo);
3315 if (unlikely(ret))
3316 return ret;
3317 return do_rt_sigqueueinfo(pid, sig, &info);
3318}
3319#endif
Al Viro75907d42012-12-25 15:19:12 -05003320
Al Viro9aae8fc2012-12-24 23:12:04 -05003321static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003322{
3323 /* This is only valid for single tasks */
3324 if (pid <= 0 || tgid <= 0)
3325 return -EINVAL;
3326
3327 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003328 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3329 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003330 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3331 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003332 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003333
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003334 info->si_signo = sig;
3335
3336 return do_send_specific(tgid, pid, sig, info);
3337}
3338
3339SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3340 siginfo_t __user *, uinfo)
3341{
3342 siginfo_t info;
3343
3344 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3345 return -EFAULT;
3346
3347 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3348}
3349
Al Viro9aae8fc2012-12-24 23:12:04 -05003350#ifdef CONFIG_COMPAT
3351COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3352 compat_pid_t, tgid,
3353 compat_pid_t, pid,
3354 int, sig,
3355 struct compat_siginfo __user *, uinfo)
3356{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003357 siginfo_t info;
Al Viro9aae8fc2012-12-24 23:12:04 -05003358
3359 if (copy_siginfo_from_user32(&info, uinfo))
3360 return -EFAULT;
3361 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3362}
3363#endif
3364
Oleg Nesterov03417292014-06-06 14:36:53 -07003365/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003366 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003367 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003368void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003369{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003370 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003371 current->sighand->action[sig - 1].sa.sa_handler = action;
3372 if (action == SIG_IGN) {
3373 sigset_t mask;
3374
3375 sigemptyset(&mask);
3376 sigaddset(&mask, sig);
3377
3378 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3379 flush_sigqueue_mask(&mask, &current->pending);
3380 recalc_sigpending();
3381 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003382 spin_unlock_irq(&current->sighand->siglock);
3383}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003384EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003385
Dmitry Safonov68463512016-09-05 16:33:08 +03003386void __weak sigaction_compat_abi(struct k_sigaction *act,
3387 struct k_sigaction *oact)
3388{
3389}
3390
Oleg Nesterov88531f72006-03-28 16:11:24 -08003391int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003393 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 struct k_sigaction *k;
George Anzinger71fabd5e2006-01-08 01:02:48 -08003395 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003397 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 return -EINVAL;
3399
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003400 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003402 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 if (oact)
3404 *oact = *k;
3405
Dmitry Safonov68463512016-09-05 16:33:08 +03003406 sigaction_compat_abi(act, oact);
3407
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003409 sigdelsetmask(&act->sa.sa_mask,
3410 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003411 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 /*
3413 * POSIX 3.3.1.3:
3414 * "Setting a signal action to SIG_IGN for a signal that is
3415 * pending shall cause the pending signal to be discarded,
3416 * whether or not it is blocked."
3417 *
3418 * "Setting a signal action to SIG_DFL for a signal that is
3419 * pending and whose default action is to ignore the signal
3420 * (for example, SIGCHLD), shall cause the pending signal to
3421 * be discarded, whether or not it is blocked"
3422 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003423 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd5e2006-01-08 01:02:48 -08003424 sigemptyset(&mask);
3425 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003426 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3427 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003428 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 }
3431
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003432 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 return 0;
3434}
3435
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003436static int
Al Virobcfe8ad2017-05-27 00:29:34 -04003437do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438{
Al Virobcfe8ad2017-05-27 00:29:34 -04003439 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
Al Virobcfe8ad2017-05-27 00:29:34 -04003441 if (oss) {
3442 memset(oss, 0, sizeof(stack_t));
3443 oss->ss_sp = (void __user *) t->sas_ss_sp;
3444 oss->ss_size = t->sas_ss_size;
3445 oss->ss_flags = sas_ss_flags(sp) |
3446 (current->sas_ss_flags & SS_FLAG_BITS);
3447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
Al Virobcfe8ad2017-05-27 00:29:34 -04003449 if (ss) {
3450 void __user *ss_sp = ss->ss_sp;
3451 size_t ss_size = ss->ss_size;
3452 unsigned ss_flags = ss->ss_flags;
Stas Sergeev407bc162016-04-14 23:20:03 +03003453 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
Al Virobcfe8ad2017-05-27 00:29:34 -04003455 if (unlikely(on_sig_stack(sp)))
3456 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
Stas Sergeev407bc162016-04-14 23:20:03 +03003458 ss_mode = ss_flags & ~SS_FLAG_BITS;
Al Virobcfe8ad2017-05-27 00:29:34 -04003459 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3460 ss_mode != 0))
3461 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
Stas Sergeev407bc162016-04-14 23:20:03 +03003463 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 ss_size = 0;
3465 ss_sp = NULL;
3466 } else {
Al Virobcfe8ad2017-05-27 00:29:34 -04003467 if (unlikely(ss_size < MINSIGSTKSZ))
3468 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 }
3470
Al Virobcfe8ad2017-05-27 00:29:34 -04003471 t->sas_ss_sp = (unsigned long) ss_sp;
3472 t->sas_ss_size = ss_size;
3473 t->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003475 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476}
Al Virobcfe8ad2017-05-27 00:29:34 -04003477
Al Viro6bf9adf2012-12-14 14:09:47 -05003478SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3479{
Al Virobcfe8ad2017-05-27 00:29:34 -04003480 stack_t new, old;
3481 int err;
3482 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3483 return -EFAULT;
3484 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3485 current_user_stack_pointer());
3486 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3487 err = -EFAULT;
3488 return err;
Al Viro6bf9adf2012-12-14 14:09:47 -05003489}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490
Al Viro5c495742012-11-18 15:29:16 -05003491int restore_altstack(const stack_t __user *uss)
3492{
Al Virobcfe8ad2017-05-27 00:29:34 -04003493 stack_t new;
3494 if (copy_from_user(&new, uss, sizeof(stack_t)))
3495 return -EFAULT;
3496 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
Al Viro5c495742012-11-18 15:29:16 -05003497 /* squash all but EFAULT for now */
Al Virobcfe8ad2017-05-27 00:29:34 -04003498 return 0;
Al Viro5c495742012-11-18 15:29:16 -05003499}
3500
Al Viroc40702c2012-11-20 14:24:26 -05003501int __save_altstack(stack_t __user *uss, unsigned long sp)
3502{
3503 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003504 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3505 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003506 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003507 if (err)
3508 return err;
3509 if (t->sas_ss_flags & SS_AUTODISARM)
3510 sas_ss_reset(t);
3511 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003512}
3513
Al Viro90268432012-12-14 14:47:53 -05003514#ifdef CONFIG_COMPAT
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003515static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3516 compat_stack_t __user *uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003517{
3518 stack_t uss, uoss;
3519 int ret;
Al Viro90268432012-12-14 14:47:53 -05003520
3521 if (uss_ptr) {
3522 compat_stack_t uss32;
Al Viro90268432012-12-14 14:47:53 -05003523 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3524 return -EFAULT;
3525 uss.ss_sp = compat_ptr(uss32.ss_sp);
3526 uss.ss_flags = uss32.ss_flags;
3527 uss.ss_size = uss32.ss_size;
3528 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003529 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
Al Viro90268432012-12-14 14:47:53 -05003530 compat_user_stack_pointer());
Al Viro90268432012-12-14 14:47:53 -05003531 if (ret >= 0 && uoss_ptr) {
Al Virobcfe8ad2017-05-27 00:29:34 -04003532 compat_stack_t old;
3533 memset(&old, 0, sizeof(old));
3534 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3535 old.ss_flags = uoss.ss_flags;
3536 old.ss_size = uoss.ss_size;
3537 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
Al Viro90268432012-12-14 14:47:53 -05003538 ret = -EFAULT;
3539 }
3540 return ret;
3541}
3542
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003543COMPAT_SYSCALL_DEFINE2(sigaltstack,
3544 const compat_stack_t __user *, uss_ptr,
3545 compat_stack_t __user *, uoss_ptr)
3546{
3547 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3548}
3549
Al Viro90268432012-12-14 14:47:53 -05003550int compat_restore_altstack(const compat_stack_t __user *uss)
3551{
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003552 int err = do_compat_sigaltstack(uss, NULL);
Al Viro90268432012-12-14 14:47:53 -05003553 /* squash all but -EFAULT for now */
3554 return err == -EFAULT ? err : 0;
3555}
Al Viroc40702c2012-11-20 14:24:26 -05003556
3557int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3558{
Stas Sergeev441398d2017-02-27 14:27:25 -08003559 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003560 struct task_struct *t = current;
Stas Sergeev441398d2017-02-27 14:27:25 -08003561 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3562 &uss->ss_sp) |
3563 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003564 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev441398d2017-02-27 14:27:25 -08003565 if (err)
3566 return err;
3567 if (t->sas_ss_flags & SS_AUTODISARM)
3568 sas_ss_reset(t);
3569 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003570}
Al Viro90268432012-12-14 14:47:53 -05003571#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
3573#ifdef __ARCH_WANT_SYS_SIGPENDING
3574
Randy Dunlap41c57892011-04-04 15:00:26 -07003575/**
3576 * sys_sigpending - examine pending signals
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003577 * @uset: where mask of pending signal is returned
Randy Dunlap41c57892011-04-04 15:00:26 -07003578 */
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003579SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003581 sigset_t set;
3582 int err;
3583
3584 if (sizeof(old_sigset_t) > sizeof(*uset))
3585 return -EINVAL;
3586
3587 err = do_sigpending(&set);
3588 if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t)))
3589 err = -EFAULT;
3590 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591}
3592
Al Viro8f136212017-05-31 04:42:07 -04003593#ifdef CONFIG_COMPAT
3594COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3595{
3596 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03003597 int err = do_sigpending(&set);
Dmitry V. Levinfbb77612017-08-05 23:00:50 +03003598 if (!err)
3599 err = put_user(set.sig[0], set32);
Al Viro8f136212017-05-31 04:42:07 -04003600 return err;
3601}
3602#endif
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604#endif
3605
3606#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003607/**
3608 * sys_sigprocmask - examine and change blocked signals
3609 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003610 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003611 * @oset: previous value of signal mask if non-null
3612 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003613 * Some platforms have their own version with special arguments;
3614 * others support only sys_rt_sigprocmask.
3615 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
Oleg Nesterovb013c392011-04-28 11:36:20 +02003617SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003618 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003621 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Oleg Nesterovb013c392011-04-28 11:36:20 +02003623 old_set = current->blocked.sig[0];
3624
3625 if (nset) {
3626 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3627 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003629 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003633 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 break;
3635 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003636 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 break;
3638 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003639 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003641 default:
3642 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 }
3644
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003645 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003647
3648 if (oset) {
3649 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3650 return -EFAULT;
3651 }
3652
3653 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654}
3655#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3656
Al Viroeaca6ea2012-11-25 23:12:10 -05003657#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003658/**
3659 * sys_rt_sigaction - alter an action taken by a process
3660 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003661 * @act: new sigaction
3662 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003663 * @sigsetsize: size of sigset_t type
3664 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003665SYSCALL_DEFINE4(rt_sigaction, int, sig,
3666 const struct sigaction __user *, act,
3667 struct sigaction __user *, oact,
3668 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669{
3670 struct k_sigaction new_sa, old_sa;
3671 int ret = -EINVAL;
3672
3673 /* XXX: Don't preclude handling different sized sigset_t's. */
3674 if (sigsetsize != sizeof(sigset_t))
3675 goto out;
3676
3677 if (act) {
3678 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3679 return -EFAULT;
3680 }
3681
3682 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3683
3684 if (!ret && oact) {
3685 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3686 return -EFAULT;
3687 }
3688out:
3689 return ret;
3690}
Al Viro08d32fe2012-12-25 18:38:15 -05003691#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003692COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3693 const struct compat_sigaction __user *, act,
3694 struct compat_sigaction __user *, oact,
3695 compat_size_t, sigsetsize)
3696{
3697 struct k_sigaction new_ka, old_ka;
Al Viro08d32fe2012-12-25 18:38:15 -05003698#ifdef __ARCH_HAS_SA_RESTORER
3699 compat_uptr_t restorer;
3700#endif
3701 int ret;
3702
3703 /* XXX: Don't preclude handling different sized sigset_t's. */
3704 if (sigsetsize != sizeof(compat_sigset_t))
3705 return -EINVAL;
3706
3707 if (act) {
3708 compat_uptr_t handler;
3709 ret = get_user(handler, &act->sa_handler);
3710 new_ka.sa.sa_handler = compat_ptr(handler);
3711#ifdef __ARCH_HAS_SA_RESTORER
3712 ret |= get_user(restorer, &act->sa_restorer);
3713 new_ka.sa.sa_restorer = compat_ptr(restorer);
3714#endif
Al Viro3968cf62017-09-03 21:45:17 -04003715 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003716 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003717 if (ret)
3718 return -EFAULT;
Al Viro08d32fe2012-12-25 18:38:15 -05003719 }
3720
3721 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3722 if (!ret && oact) {
Al Viro08d32fe2012-12-25 18:38:15 -05003723 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3724 &oact->sa_handler);
Dmitry V. Levinf4543222017-08-22 02:16:11 +03003725 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3726 sizeof(oact->sa_mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003727 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003728#ifdef __ARCH_HAS_SA_RESTORER
3729 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3730 &oact->sa_restorer);
3731#endif
3732 }
3733 return ret;
3734}
3735#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003736#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
Al Viro495dfbf2012-12-25 19:09:45 -05003738#ifdef CONFIG_OLD_SIGACTION
3739SYSCALL_DEFINE3(sigaction, int, sig,
3740 const struct old_sigaction __user *, act,
3741 struct old_sigaction __user *, oact)
3742{
3743 struct k_sigaction new_ka, old_ka;
3744 int ret;
3745
3746 if (act) {
3747 old_sigset_t mask;
3748 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3749 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3750 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3751 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3752 __get_user(mask, &act->sa_mask))
3753 return -EFAULT;
3754#ifdef __ARCH_HAS_KA_RESTORER
3755 new_ka.ka_restorer = NULL;
3756#endif
3757 siginitset(&new_ka.sa.sa_mask, mask);
3758 }
3759
3760 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3761
3762 if (!ret && oact) {
3763 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3764 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3765 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3766 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3767 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3768 return -EFAULT;
3769 }
3770
3771 return ret;
3772}
3773#endif
3774#ifdef CONFIG_COMPAT_OLD_SIGACTION
3775COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3776 const struct compat_old_sigaction __user *, act,
3777 struct compat_old_sigaction __user *, oact)
3778{
3779 struct k_sigaction new_ka, old_ka;
3780 int ret;
3781 compat_old_sigset_t mask;
3782 compat_uptr_t handler, restorer;
3783
3784 if (act) {
3785 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3786 __get_user(handler, &act->sa_handler) ||
3787 __get_user(restorer, &act->sa_restorer) ||
3788 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3789 __get_user(mask, &act->sa_mask))
3790 return -EFAULT;
3791
3792#ifdef __ARCH_HAS_KA_RESTORER
3793 new_ka.ka_restorer = NULL;
3794#endif
3795 new_ka.sa.sa_handler = compat_ptr(handler);
3796 new_ka.sa.sa_restorer = compat_ptr(restorer);
3797 siginitset(&new_ka.sa.sa_mask, mask);
3798 }
3799
3800 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3801
3802 if (!ret && oact) {
3803 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3804 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3805 &oact->sa_handler) ||
3806 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3807 &oact->sa_restorer) ||
3808 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3809 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3810 return -EFAULT;
3811 }
3812 return ret;
3813}
3814#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815
Fabian Frederickf6187762014-06-04 16:11:12 -07003816#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817
3818/*
3819 * For backwards compatibility. Functionality superseded by sigprocmask.
3820 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003821SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822{
3823 /* SMP safe */
3824 return current->blocked.sig[0];
3825}
3826
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003827SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003829 int old = current->blocked.sig[0];
3830 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003832 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003833 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834
3835 return old;
3836}
Fabian Frederickf6187762014-06-04 16:11:12 -07003837#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
3839#ifdef __ARCH_WANT_SYS_SIGNAL
3840/*
3841 * For backwards compatibility. Functionality superseded by sigaction.
3842 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003843SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844{
3845 struct k_sigaction new_sa, old_sa;
3846 int ret;
3847
3848 new_sa.sa.sa_handler = handler;
3849 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003850 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
3852 ret = do_sigaction(sig, &new_sa, &old_sa);
3853
3854 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3855}
3856#endif /* __ARCH_WANT_SYS_SIGNAL */
3857
3858#ifdef __ARCH_WANT_SYS_PAUSE
3859
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003860SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003862 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003863 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003864 schedule();
3865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 return -ERESTARTNOHAND;
3867}
3868
3869#endif
3870
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003871static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003872{
Al Viro68f3f162012-05-21 21:42:32 -04003873 current->saved_sigmask = current->blocked;
3874 set_current_blocked(set);
3875
Sasha Levin823dd322016-02-05 15:36:05 -08003876 while (!signal_pending(current)) {
3877 __set_current_state(TASK_INTERRUPTIBLE);
3878 schedule();
3879 }
Al Viro68f3f162012-05-21 21:42:32 -04003880 set_restore_sigmask();
3881 return -ERESTARTNOHAND;
3882}
Al Viro68f3f162012-05-21 21:42:32 -04003883
Randy Dunlap41c57892011-04-04 15:00:26 -07003884/**
3885 * sys_rt_sigsuspend - replace the signal mask for a value with the
3886 * @unewset value until a signal is received
3887 * @unewset: new signal mask value
3888 * @sigsetsize: size of sigset_t type
3889 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003890SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003891{
3892 sigset_t newset;
3893
3894 /* XXX: Don't preclude handling different sized sigset_t's. */
3895 if (sigsetsize != sizeof(sigset_t))
3896 return -EINVAL;
3897
3898 if (copy_from_user(&newset, unewset, sizeof(newset)))
3899 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003900 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003901}
Al Viroad4b65a2012-12-24 21:43:56 -05003902
3903#ifdef CONFIG_COMPAT
3904COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3905{
Al Viroad4b65a2012-12-24 21:43:56 -05003906 sigset_t newset;
Al Viroad4b65a2012-12-24 21:43:56 -05003907
3908 /* XXX: Don't preclude handling different sized sigset_t's. */
3909 if (sigsetsize != sizeof(sigset_t))
3910 return -EINVAL;
3911
Al Viro3968cf62017-09-03 21:45:17 -04003912 if (get_compat_sigset(&newset, unewset))
Al Viroad4b65a2012-12-24 21:43:56 -05003913 return -EFAULT;
Al Viroad4b65a2012-12-24 21:43:56 -05003914 return sigsuspend(&newset);
Al Viroad4b65a2012-12-24 21:43:56 -05003915}
3916#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003917
Al Viro0a0e8cd2012-12-25 16:04:12 -05003918#ifdef CONFIG_OLD_SIGSUSPEND
3919SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3920{
3921 sigset_t blocked;
3922 siginitset(&blocked, mask);
3923 return sigsuspend(&blocked);
3924}
3925#endif
3926#ifdef CONFIG_OLD_SIGSUSPEND3
3927SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3928{
3929 sigset_t blocked;
3930 siginitset(&blocked, mask);
3931 return sigsuspend(&blocked);
3932}
3933#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003935__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003936{
3937 return NULL;
3938}
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940void __init signals_init(void)
3941{
Helge Deller41b27152016-03-22 14:27:54 -07003942 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3943 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3944 != offsetof(struct siginfo, _sifields._pad));
Eric W. Biedermanaba1be22017-07-19 21:23:15 -05003945 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
Helge Deller41b27152016-03-22 14:27:54 -07003946
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003947 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003949
3950#ifdef CONFIG_KGDB_KDB
3951#include <linux/kdb.h>
3952/*
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003953 * kdb_send_sig - Allows kdb to send signals without exposing
Jason Wessel67fc4e02010-05-20 21:04:21 -05003954 * signal internals. This function checks if the required locks are
3955 * available before calling the main signal code, to avoid kdb
3956 * deadlocks.
3957 */
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003958void kdb_send_sig(struct task_struct *t, int sig)
Jason Wessel67fc4e02010-05-20 21:04:21 -05003959{
3960 static struct task_struct *kdb_prev_t;
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003961 int new_t, ret;
Jason Wessel67fc4e02010-05-20 21:04:21 -05003962 if (!spin_trylock(&t->sighand->siglock)) {
3963 kdb_printf("Can't do kill command now.\n"
3964 "The sigmask lock is held somewhere else in "
3965 "kernel, try again later\n");
3966 return;
3967 }
Jason Wessel67fc4e02010-05-20 21:04:21 -05003968 new_t = kdb_prev_t != t;
3969 kdb_prev_t = t;
3970 if (t->state != TASK_RUNNING && new_t) {
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003971 spin_unlock(&t->sighand->siglock);
Jason Wessel67fc4e02010-05-20 21:04:21 -05003972 kdb_printf("Process is not RUNNING, sending a signal from "
3973 "kdb risks deadlock\n"
3974 "on the run queue locks. "
3975 "The signal has _not_ been sent.\n"
3976 "Reissue the kill command if you want to risk "
3977 "the deadlock.\n");
3978 return;
3979 }
Eric W. Biedermanb2139842018-07-20 15:49:17 -05003980 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003981 spin_unlock(&t->sighand->siglock);
3982 if (ret)
Jason Wessel67fc4e02010-05-20 21:04:21 -05003983 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3984 sig, t->pid);
3985 else
3986 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3987}
3988#endif /* CONFIG_KGDB_KDB */