blob: 99e91163c9a3c55fd3e60f73c89c98148316ce24 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010016#include <linux/sched/mm.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010017#include <linux/sched/user.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010018#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010019#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010020#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010021#include <linux/sched/cputime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/fs.h>
23#include <linux/tty.h>
24#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070025#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/security.h>
27#include <linux/syscalls.h>
28#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070030#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090031#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070032#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080033#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080034#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080035#include <linux/pid_namespace.h>
36#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080037#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053038#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050039#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000040#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070041#include <linux/compiler.h>
Christoph Hellwig31ea70e2017-06-03 21:01:00 +020042#include <linux/posix-timers.h>
Miroslav Benes43347d52017-11-15 14:50:13 +010043#include <linux/livepatch.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070044
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050045#define CREATE_TRACE_POINTS
46#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/param.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unistd.h>
51#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010052#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040053#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/*
56 * SLAB caches for signal bits.
57 */
58
Christoph Lametere18b8902006-12-06 20:33:20 -080059static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090061int print_fatal_signals __read_mostly;
62
Roland McGrath35de2542008-07-25 19:45:51 -070063static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070064{
Roland McGrath35de2542008-07-25 19:45:51 -070065 return t->sighand->action[sig - 1].sa.sa_handler;
66}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070067
Christian Braunere4a8b4e2018-08-21 22:00:15 -070068static inline bool sig_handler_ignored(void __user *handler, int sig)
Roland McGrath35de2542008-07-25 19:45:51 -070069{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070070 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070071 return handler == SIG_IGN ||
Christian Braunere4a8b4e2018-08-21 22:00:15 -070072 (handler == SIG_DFL && sig_kernel_ignore(sig));
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070073}
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Christian Brauner41aaa482018-08-21 22:00:19 -070075static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Roland McGrath35de2542008-07-25 19:45:51 -070077 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Oleg Nesterovf008faf2009-04-02 16:58:02 -070079 handler = sig_handler(t, sig);
80
Eric W. Biederman86989c42018-07-19 19:47:27 -050081 /* SIGKILL and SIGSTOP may not be sent to the global init */
82 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
83 return true;
84
Oleg Nesterovf008faf2009-04-02 16:58:02 -070085 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterovac253852017-11-17 15:30:04 -080086 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Christian Brauner41aaa482018-08-21 22:00:19 -070087 return true;
Oleg Nesterovf008faf2009-04-02 16:58:02 -070088
89 return sig_handler_ignored(handler, sig);
90}
91
Christian Brauner6a0cdcd2018-08-21 22:00:23 -070092static bool sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070093{
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /*
95 * Blocked signals are never ignored, since the
96 * signal handler may change by the time it is
97 * unblocked.
98 */
Roland McGrath325d22d2007-11-12 15:41:55 -080099 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700100 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800102 /*
103 * Tracers may want to know about even ignored signal unless it
104 * is SIGKILL which can't be reported anyway but can be ignored
105 * by SIGNAL_UNKILLABLE task.
106 */
107 if (t->ptrace && sig != SIGKILL)
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700108 return false;
Roland McGrath35de2542008-07-25 19:45:51 -0700109
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800110 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
113/*
114 * Re-calculate pending state from the set of locally pending
115 * signals, globally pending signals, and blocked signals.
116 */
Christian Brauner938696a2018-08-21 22:00:27 -0700117static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
119 unsigned long ready;
120 long i;
121
122 switch (_NSIG_WORDS) {
123 default:
124 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
125 ready |= signal->sig[i] &~ blocked->sig[i];
126 break;
127
128 case 4: ready = signal->sig[3] &~ blocked->sig[3];
129 ready |= signal->sig[2] &~ blocked->sig[2];
130 ready |= signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
132 break;
133
134 case 2: ready = signal->sig[1] &~ blocked->sig[1];
135 ready |= signal->sig[0] &~ blocked->sig[0];
136 break;
137
138 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 }
140 return ready != 0;
141}
142
143#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
144
Christian Brauner09ae8542018-08-21 22:00:30 -0700145static bool recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200147 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 set_tsk_thread_flag(t, TIF_SIGPENDING);
Christian Brauner09ae8542018-08-21 22:00:30 -0700151 return true;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700152 }
Christian Brauner09ae8542018-08-21 22:00:30 -0700153
Roland McGrathb74d0de2007-06-06 03:59:00 -0700154 /*
155 * We must never clear the flag in another thread, or in current
156 * when it's possible the current syscall is returning -ERESTART*.
157 * So we don't clear it here, and only callers who know they should do.
158 */
Christian Brauner09ae8542018-08-21 22:00:30 -0700159 return false;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700160}
161
162/*
163 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
164 * This is superfluous when called on current, the wakeup is a harmless no-op.
165 */
166void recalc_sigpending_and_wake(struct task_struct *t)
167{
168 if (recalc_sigpending_tsk(t))
169 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
172void recalc_sigpending(void)
173{
Miroslav Benes43347d52017-11-15 14:50:13 +0100174 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
175 !klp_patch_pending(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700176 clear_thread_flag(TIF_SIGPENDING);
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Eric W. Biederman088fe472018-07-23 17:26:49 -0500180void calculate_sigpending(void)
181{
182 /* Have any signals or users of TIF_SIGPENDING been delayed
183 * until after fork?
184 */
185 spin_lock_irq(&current->sighand->siglock);
186 set_tsk_thread_flag(current, TIF_SIGPENDING);
187 recalc_sigpending();
188 spin_unlock_irq(&current->sighand->siglock);
189}
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/* Given the mask, find the first available signal that should be serviced. */
192
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800193#define SYNCHRONOUS_MASK \
194 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500195 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800196
Davide Libenzifba2afa2007-05-10 22:23:13 -0700197int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 unsigned long i, *s, *m, x;
200 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 s = pending->signal.sig;
203 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800204
205 /*
206 * Handle the first word specially: it contains the
207 * synchronous signals that need to be dequeued first.
208 */
209 x = *s &~ *m;
210 if (x) {
211 if (x & SYNCHRONOUS_MASK)
212 x &= SYNCHRONOUS_MASK;
213 sig = ffz(~x) + 1;
214 return sig;
215 }
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 switch (_NSIG_WORDS) {
218 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800219 for (i = 1; i < _NSIG_WORDS; ++i) {
220 x = *++s &~ *++m;
221 if (!x)
222 continue;
223 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 break;
227
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800228 case 2:
229 x = s[1] &~ m[1];
230 if (!x)
231 break;
232 sig = ffz(~x) + _NSIG_BPW + 1;
233 break;
234
235 case 1:
236 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 break;
238 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 return sig;
241}
242
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900243static inline void print_dropped_signal(int sig)
244{
245 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
246
247 if (!print_fatal_signals)
248 return;
249
250 if (!__ratelimit(&ratelimit_state))
251 return;
252
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700253 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900254 current->comm, current->pid, sig);
255}
256
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100257/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200258 * task_set_jobctl_pending - set jobctl pending bits
259 * @task: target task
260 * @mask: pending bits to set
261 *
262 * Clear @mask from @task->jobctl. @mask must be subset of
263 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
264 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
265 * cleared. If @task is already being killed or exiting, this function
266 * becomes noop.
267 *
268 * CONTEXT:
269 * Must be called with @task->sighand->siglock held.
270 *
271 * RETURNS:
272 * %true if @mask is set, %false if made noop because @task was dying.
273 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700274bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200275{
276 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
277 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
278 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
279
280 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
281 return false;
282
283 if (mask & JOBCTL_STOP_SIGMASK)
284 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
285
286 task->jobctl |= mask;
287 return true;
288}
289
290/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200291 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100292 * @task: target task
293 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200294 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
295 * Clear it and wake up the ptracer. Note that we don't need any further
296 * locking. @task->siglock guarantees that @task->parent points to the
297 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100298 *
299 * CONTEXT:
300 * Must be called with @task->sighand->siglock held.
301 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200302void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100303{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200304 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
305 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700306 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200307 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100308 }
309}
310
311/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200312 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100313 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200314 * @mask: pending bits to clear
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100315 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200316 * Clear @mask from @task->jobctl. @mask must be subset of
317 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
318 * STOP bits are cleared together.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100319 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200320 * If clearing of @mask leaves no stop or trap pending, this function calls
321 * task_clear_jobctl_trapping().
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100322 *
323 * CONTEXT:
324 * Must be called with @task->sighand->siglock held.
325 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700326void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100327{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200328 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
329
330 if (mask & JOBCTL_STOP_PENDING)
331 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
332
333 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200334
335 if (!(task->jobctl & JOBCTL_PENDING_MASK))
336 task_clear_jobctl_trapping(task);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100337}
338
339/**
340 * task_participate_group_stop - participate in a group stop
341 * @task: task participating in a group stop
342 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200343 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100344 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200345 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100346 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100347 *
348 * CONTEXT:
349 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100350 *
351 * RETURNS:
352 * %true if group stop completion should be notified to the parent, %false
353 * otherwise.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100354 */
355static bool task_participate_group_stop(struct task_struct *task)
356{
357 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200358 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100359
Tejun Heoa8f072c2011-06-02 11:13:59 +0200360 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100361
Tejun Heo3759a0d2011-06-02 11:14:00 +0200362 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100363
364 if (!consume)
365 return false;
366
367 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
368 sig->group_stop_count--;
369
Tejun Heo244056f2011-03-23 10:37:01 +0100370 /*
371 * Tell the caller to notify completion iff we are entering into a
372 * fresh group stop. Read comment in do_signal_stop() for details.
373 */
374 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800375 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100376 return true;
377 }
378 return false;
379}
380
Eric W. Biederman924de3b2018-07-23 13:38:00 -0500381void task_join_group_stop(struct task_struct *task)
382{
383 /* Have the new thread join an on-going signal group stop */
384 unsigned long jobctl = current->jobctl;
385 if (jobctl & JOBCTL_STOP_PENDING) {
386 struct signal_struct *sig = current->signal;
387 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
388 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
389 if (task_set_jobctl_pending(task, signr | gstop)) {
390 sig->group_stop_count++;
391 }
392 }
393}
394
David Howellsc69e8d92008-11-14 10:39:19 +1100395/*
396 * allocate a new signal queue record
397 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700398 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100399 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900400static struct sigqueue *
401__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800404 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800406 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000407 * Protect access to @t credentials. This can go away when all
408 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800409 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000410 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100411 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800412 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000413 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800416 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800417 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900419 } else {
420 print_dropped_signal(sig);
421 }
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800424 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100425 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 } else {
427 INIT_LIST_HEAD(&q->list);
428 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100429 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
David Howellsd84f4f92008-11-14 10:39:23 +1100431
432 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Andrew Morton514a01b2006-02-03 03:04:41 -0800435static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 if (q->flags & SIGQUEUE_PREALLOC)
438 return;
439 atomic_dec(&q->user->sigpending);
440 free_uid(q->user);
441 kmem_cache_free(sigqueue_cachep, q);
442}
443
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800444void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 struct sigqueue *q;
447
448 sigemptyset(&queue->signal);
449 while (!list_empty(&queue->list)) {
450 q = list_entry(queue->list.next, struct sigqueue , list);
451 list_del_init(&q->list);
452 __sigqueue_free(q);
453 }
454}
455
456/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400457 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800459void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 unsigned long flags;
462
463 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400464 clear_tsk_thread_flag(t, TIF_SIGPENDING);
465 flush_sigqueue(&t->pending);
466 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 spin_unlock_irqrestore(&t->sighand->siglock, flags);
468}
469
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500470#ifdef CONFIG_POSIX_TIMERS
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400471static void __flush_itimer_signals(struct sigpending *pending)
472{
473 sigset_t signal, retain;
474 struct sigqueue *q, *n;
475
476 signal = pending->signal;
477 sigemptyset(&retain);
478
479 list_for_each_entry_safe(q, n, &pending->list, list) {
480 int sig = q->info.si_signo;
481
482 if (likely(q->info.si_code != SI_TIMER)) {
483 sigaddset(&retain, sig);
484 } else {
485 sigdelset(&signal, sig);
486 list_del_init(&q->list);
487 __sigqueue_free(q);
488 }
489 }
490
491 sigorsets(&pending->signal, &signal, &retain);
492}
493
494void flush_itimer_signals(void)
495{
496 struct task_struct *tsk = current;
497 unsigned long flags;
498
499 spin_lock_irqsave(&tsk->sighand->siglock, flags);
500 __flush_itimer_signals(&tsk->pending);
501 __flush_itimer_signals(&tsk->signal->shared_pending);
502 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
503}
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500504#endif
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400505
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700506void ignore_signals(struct task_struct *t)
507{
508 int i;
509
510 for (i = 0; i < _NSIG; ++i)
511 t->sighand->action[i].sa.sa_handler = SIG_IGN;
512
513 flush_signals(t);
514}
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 * Flush all handlers for a task.
518 */
519
520void
521flush_signal_handlers(struct task_struct *t, int force_default)
522{
523 int i;
524 struct k_sigaction *ka = &t->sighand->action[0];
525 for (i = _NSIG ; i != 0 ; i--) {
526 if (force_default || ka->sa.sa_handler != SIG_IGN)
527 ka->sa.sa_handler = SIG_DFL;
528 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700529#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700530 ka->sa.sa_restorer = NULL;
531#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 sigemptyset(&ka->sa.sa_mask);
533 ka++;
534 }
535}
536
Christian Brauner67a48a22018-08-21 22:00:34 -0700537bool unhandled_signal(struct task_struct *tsk, int sig)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200538{
Roland McGrath445a91d2008-07-25 19:45:52 -0700539 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700540 if (is_global_init(tsk))
Christian Brauner67a48a22018-08-21 22:00:34 -0700541 return true;
542
Roland McGrath445a91d2008-07-25 19:45:52 -0700543 if (handler != SIG_IGN && handler != SIG_DFL)
Christian Brauner67a48a22018-08-21 22:00:34 -0700544 return false;
545
Tejun Heoa288eec2011-06-17 16:50:37 +0200546 /* if ptraced, let the tracer determine */
547 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200548}
549
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500550static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
551 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /*
556 * Collect the siginfo appropriate to this signal. Check if
557 * there is another siginfo for the same signal.
558 */
559 list_for_each_entry(q, &list->list, list) {
560 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700561 if (first)
562 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 first = q;
564 }
565 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700566
567 sigdelset(&list->signal, sig);
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700570still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 list_del_init(&first->list);
572 copy_siginfo(info, &first->info);
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500573
574 *resched_timer =
575 (first->flags & SIGQUEUE_PREALLOC) &&
576 (info->si_code == SI_TIMER) &&
577 (info->si_sys_private);
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700581 /*
582 * Ok, it wasn't in the queue. This must be
583 * a fast-pathed signal or we must have been
584 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 */
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600586 clear_siginfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 info->si_signo = sig;
588 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800589 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 info->si_pid = 0;
591 info->si_uid = 0;
592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593}
594
595static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500596 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Roland McGrath27d91e02006-09-29 02:00:31 -0700598 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800600 if (sig)
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500601 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return sig;
603}
604
605/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700606 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 * expected to free it.
608 *
609 * All callers have to hold the siglock.
610 */
611int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
612{
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500613 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700614 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000615
616 /* We only dequeue private signals from ourselves, we don't let
617 * signalfd steal them
618 */
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500619 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800620 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500622 mask, info, &resched_timer);
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500623#ifdef CONFIG_POSIX_TIMERS
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800624 /*
625 * itimer signal ?
626 *
627 * itimers are process shared and we restart periodic
628 * itimers in the signal delivery path to prevent DoS
629 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700630 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800631 * itimers, as the SIGALRM is a legacy signal and only
632 * queued once. Changing the restart behaviour to
633 * restart the timer in the signal dequeue path is
634 * reducing the timer noise on heavy loaded !highres
635 * systems too.
636 */
637 if (unlikely(signr == SIGALRM)) {
638 struct hrtimer *tmr = &tsk->signal->real_timer;
639
640 if (!hrtimer_is_queued(tmr) &&
Thomas Gleixner2456e852016-12-25 11:38:40 +0100641 tsk->signal->it_real_incr != 0) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800642 hrtimer_forward(tmr, tmr->base->get_time(),
643 tsk->signal->it_real_incr);
644 hrtimer_restart(tmr);
645 }
646 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500647#endif
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800648 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700649
Davide Libenzib8fceee2007-09-20 12:40:16 -0700650 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700651 if (!signr)
652 return 0;
653
654 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800655 /*
656 * Set a marker that we have dequeued a stop signal. Our
657 * caller might release the siglock and then the pending
658 * stop signal it is about to process is no longer in the
659 * pending bitmasks, but must still be cleared by a SIGCONT
660 * (and overruled by a SIGKILL). So those cases clear this
661 * shared flag after we've set it. Note that this flag may
662 * remain set after the signal we return is ignored or
663 * handled. That doesn't matter because its only purpose
664 * is to alert stop-signal processing code when another
665 * processor has come along and cleared the flag.
666 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200667 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800668 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500669#ifdef CONFIG_POSIX_TIMERS
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500670 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 /*
672 * Release the siglock to ensure proper locking order
673 * of timer locks outside of siglocks. Note, we leave
674 * irqs disabled here, since the posix-timers code is
675 * about to disable them again anyway.
676 */
677 spin_unlock(&tsk->sighand->siglock);
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200678 posixtimer_rearm(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 spin_lock(&tsk->sighand->siglock);
Eric W. Biederman9943d3a2017-07-24 14:53:03 -0500680
681 /* Don't expose the si_sys_private value to userspace */
682 info->si_sys_private = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500684#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return signr;
686}
687
688/*
689 * Tell a process that it has a new active signal..
690 *
691 * NOTE! we rely on the previous spin_lock to
692 * lock interrupts for us! We can only be called with
693 * "siglock" held, and the local interrupt must
694 * have been disabled when that got acquired!
695 *
696 * No need to set need_resched since signal event passing
697 * goes through ->blocked
698 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100699void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100703 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500704 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 * executing another processor and just now entering stopped state.
706 * By using wake_up_state, we ensure the process will wake up and
707 * handle its death signal.
708 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100709 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 kick_process(t);
711}
712
713/*
714 * Remove signals in mask from the pending set and queue.
715 * Returns 1 if any signals were found.
716 *
717 * All callers must be holding the siglock.
George Anzinger71fabd5e2006-01-08 01:02:48 -0800718 */
Christian Brauner8f113512018-08-21 22:00:38 -0700719static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd5e2006-01-08 01:02:48 -0800720{
721 struct sigqueue *q, *n;
722 sigset_t m;
723
724 sigandsets(&m, mask, &s->signal);
725 if (sigisemptyset(&m))
Christian Brauner8f113512018-08-21 22:00:38 -0700726 return;
George Anzinger71fabd5e2006-01-08 01:02:48 -0800727
Oleg Nesterov702a5072011-04-27 22:01:27 +0200728 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd5e2006-01-08 01:02:48 -0800729 list_for_each_entry_safe(q, n, &s->list, list) {
730 if (sigismember(mask, q->info.si_signo)) {
731 list_del_init(&q->list);
732 __sigqueue_free(q);
733 }
734 }
George Anzinger71fabd5e2006-01-08 01:02:48 -0800735}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Oleg Nesterov614c5172009-12-15 16:47:22 -0800737static inline int is_si_special(const struct siginfo *info)
738{
Eric W. Biederman4ff4c312018-09-03 10:39:04 +0200739 return info <= SEND_SIG_PRIV;
Oleg Nesterov614c5172009-12-15 16:47:22 -0800740}
741
742static inline bool si_fromuser(const struct siginfo *info)
743{
744 return info == SEND_SIG_NOINFO ||
745 (!is_si_special(info) && SI_FROMUSER(info));
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700749 * called with RCU read lock from check_kill_permission()
750 */
Christian Brauner2a9b9092018-08-21 22:00:11 -0700751static bool kill_ok_by_cred(struct task_struct *t)
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700752{
753 const struct cred *cred = current_cred();
754 const struct cred *tcred = __task_cred(t);
755
Christian Brauner2a9b9092018-08-21 22:00:11 -0700756 return uid_eq(cred->euid, tcred->suid) ||
757 uid_eq(cred->euid, tcred->uid) ||
758 uid_eq(cred->uid, tcred->suid) ||
759 uid_eq(cred->uid, tcred->uid) ||
760 ns_capable(tcred->user_ns, CAP_KILL);
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700761}
762
763/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100765 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 */
767static int check_kill_permission(int sig, struct siginfo *info,
768 struct task_struct *t)
769{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700770 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700771 int error;
772
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700773 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700774 return -EINVAL;
775
Oleg Nesterov614c5172009-12-15 16:47:22 -0800776 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700777 return 0;
778
779 error = audit_signal_info(sig, t); /* Let audit system see the signal */
780 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400782
Oleg Nesterov065add32010-05-26 14:42:54 -0700783 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700784 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700785 switch (sig) {
786 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700787 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700788 /*
789 * We don't return the error if sid == NULL. The
790 * task was unhashed, the caller must notice this.
791 */
792 if (!sid || sid == task_session(current))
793 break;
794 default:
795 return -EPERM;
796 }
797 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100798
Stephen Smalley6b4f3d02017-09-08 12:40:01 -0400799 return security_task_kill(t, info, sig, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Tejun Heofb1d9102011-06-14 11:20:17 +0200802/**
803 * ptrace_trap_notify - schedule trap to notify ptracer
804 * @t: tracee wanting to notify tracer
805 *
806 * This function schedules sticky ptrace trap which is cleared on the next
807 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
808 * ptracer.
809 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200810 * If @t is running, STOP trap will be taken. If trapped for STOP and
811 * ptracer is listening for events, tracee is woken up so that it can
812 * re-trap for the new event. If trapped otherwise, STOP trap will be
813 * eventually taken without returning to userland after the existing traps
814 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200815 *
816 * CONTEXT:
817 * Must be called with @task->sighand->siglock held.
818 */
819static void ptrace_trap_notify(struct task_struct *t)
820{
821 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
822 assert_spin_locked(&t->sighand->siglock);
823
824 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100825 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200826}
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700829 * Handle magic process-wide effects of stop/continue signals. Unlike
830 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * time regardless of blocking, ignoring, or handling. This does the
832 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700833 * signals. The process stop is done as a signal action for SIG_DFL.
834 *
835 * Returns true if the signal should be actually delivered, otherwise
836 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700838static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700840 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700842 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Oleg Nesterov403bad72013-04-30 15:28:10 -0700844 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800845 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700846 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700848 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700850 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /*
852 * This is a stop signal. Remove SIGCONT from all queues.
853 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700854 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700855 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700856 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700857 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700859 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200861 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700863 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700864 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200867 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200868 if (likely(!(t->ptrace & PT_SEIZED)))
869 wake_up_state(t, __TASK_STOPPED);
870 else
871 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700874 /*
875 * Notify the parent with CLD_CONTINUED if we were stopped.
876 *
877 * If we were in the middle of a group stop, we pretend it
878 * was already finished, and then continued. Since SIGCHLD
879 * doesn't queue we report only CLD_STOPPED, as if the next
880 * CLD_CONTINUED was dropped.
881 */
882 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700883 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700884 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700885 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 why |= SIGNAL_CLD_STOPPED;
887
888 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700889 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700890 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700891 * will take ->siglock, notice SIGNAL_CLD_MASK, and
892 * notify its parent. See get_signal_to_deliver().
893 */
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800894 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700895 signal->group_stop_count = 0;
896 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700899
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700900 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700903/*
904 * Test if P wants to take SIG. After we've checked all threads with this,
905 * it's equivalent to finding no threads not blocking SIG. Any threads not
906 * blocking SIG were ruled out because they are not running and already
907 * have pending signals. Such threads will dequeue from the shared queue
908 * as soon as they're available, so putting the signal on the shared queue
909 * will be equivalent to sending it to one such thread.
910 */
Christian Brauneracd14e62018-08-21 22:00:42 -0700911static inline bool wants_signal(int sig, struct task_struct *p)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700912{
913 if (sigismember(&p->blocked, sig))
Christian Brauneracd14e62018-08-21 22:00:42 -0700914 return false;
915
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700916 if (p->flags & PF_EXITING)
Christian Brauneracd14e62018-08-21 22:00:42 -0700917 return false;
918
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700919 if (sig == SIGKILL)
Christian Brauneracd14e62018-08-21 22:00:42 -0700920 return true;
921
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700922 if (task_is_stopped_or_traced(p))
Christian Brauneracd14e62018-08-21 22:00:42 -0700923 return false;
924
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700925 return task_curr(p) || !signal_pending(p);
926}
927
Eric W. Biederman07296142018-07-13 21:39:13 -0500928static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700929{
930 struct signal_struct *signal = p->signal;
931 struct task_struct *t;
932
933 /*
934 * Now find a thread we can wake up to take the signal off the queue.
935 *
936 * If the main thread wants the signal, it gets first crack.
937 * Probably the least surprising to the average bear.
938 */
939 if (wants_signal(sig, p))
940 t = p;
Eric W. Biederman07296142018-07-13 21:39:13 -0500941 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700942 /*
943 * There is just one thread and it does not need to be woken.
944 * It will dequeue unblocked signals before it runs again.
945 */
946 return;
947 else {
948 /*
949 * Otherwise try to find a suitable thread.
950 */
951 t = signal->curr_target;
952 while (!wants_signal(sig, t)) {
953 t = next_thread(t);
954 if (t == signal->curr_target)
955 /*
956 * No thread needs to be woken.
957 * Any eligible threads will see
958 * the signal in the queue soon.
959 */
960 return;
961 }
962 signal->curr_target = t;
963 }
964
965 /*
966 * Found a killable thread. If the signal will be fatal,
967 * then start taking the whole group down immediately.
968 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700969 if (sig_fatal(p, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800970 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700971 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800972 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700973 /*
974 * This signal will be fatal to the whole group.
975 */
976 if (!sig_kernel_coredump(sig)) {
977 /*
978 * Start a group exit and wake everybody up.
979 * This way we don't have other threads
980 * running and doing things after a slower
981 * thread has the fatal signal pending.
982 */
983 signal->flags = SIGNAL_GROUP_EXIT;
984 signal->group_exit_code = sig;
985 signal->group_stop_count = 0;
986 t = p;
987 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200988 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700989 sigaddset(&t->pending.signal, SIGKILL);
990 signal_wake_up(t, 1);
991 } while_each_thread(p, t);
992 return;
993 }
994 }
995
996 /*
997 * The signal is already in the shared-pending queue.
998 * Tell the chosen thread to wake up and dequeue it.
999 */
1000 signal_wake_up(t, sig == SIGKILL);
1001 return;
1002}
1003
Christian Braunera19e2c02018-08-21 22:00:46 -07001004static inline bool legacy_queue(struct sigpending *signals, int sig)
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001005{
1006 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1007}
1008
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001009#ifdef CONFIG_USER_NS
1010static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1011{
1012 if (current_user_ns() == task_cred_xxx(t, user_ns))
1013 return;
1014
1015 if (SI_FROMKERNEL(info))
1016 return;
1017
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001018 rcu_read_lock();
1019 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1020 make_kuid(current_user_ns(), info->si_uid));
1021 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001022}
1023#else
1024static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025{
1026 return;
1027}
1028#endif
1029
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001030static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001031 enum pid_type type, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001033 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001034 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001035 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001036 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001037
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001038 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001039
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001040 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001041 if (!prepare_signal(sig, t,
Eric W. Biederman4ff4c312018-09-03 10:39:04 +02001042 from_ancestor_ns || (info == SEND_SIG_PRIV)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001043 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001044
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001045 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001047 * Short-circuit ignored signals and support queuing
1048 * exactly one non-rt signal, so that we can get more
1049 * detailed information about the cause of the signal.
1050 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001051 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001052 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001053 goto ret;
1054
1055 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001056 /*
Eric W. Biedermanf149b312018-09-03 09:50:36 +02001057 * Skip useless siginfo allocation for SIGKILL SIGSTOP,
1058 * and kernel threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 */
Eric W. Biederman4ff4c312018-09-03 10:39:04 +02001060 if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 goto out_set;
1062
Randy Dunlap5aba0852011-04-04 14:59:31 -07001063 /*
1064 * Real-time signals must be queued if sent by sigqueue, or
1065 * some other real-time mechanism. It is implementation
1066 * defined whether kill() does so. We attempt to do so, on
1067 * the principle of least surprise, but since kill is not
1068 * allowed to fail with EAGAIN when low on memory we just
1069 * make sure at least one signal gets delivered and don't
1070 * pass on the info struct.
1071 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001072 if (sig < SIGRTMIN)
1073 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1074 else
1075 override_rlimit = 0;
1076
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001077 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001079 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001081 case (unsigned long) SEND_SIG_NOINFO:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001082 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 q->info.si_signo = sig;
1084 q->info.si_errno = 0;
1085 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001086 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001087 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001088 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001090 case (unsigned long) SEND_SIG_PRIV:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001091 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 q->info.si_signo = sig;
1093 q->info.si_errno = 0;
1094 q->info.si_code = SI_KERNEL;
1095 q->info.si_pid = 0;
1096 q->info.si_uid = 0;
1097 break;
1098 default:
1099 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001100 if (from_ancestor_ns)
1101 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 break;
1103 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001104
1105 userns_fixup_signal_uid(&q->info, t);
1106
Oleg Nesterov621d3122005-10-30 15:03:45 -08001107 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001108 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1109 /*
1110 * Queue overflow, abort. We may abort if the
1111 * signal was rt and sent by user using something
1112 * other than kill().
1113 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001114 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1115 ret = -EAGAIN;
1116 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001117 } else {
1118 /*
1119 * This is a silent loss of information. We still
1120 * send the signal, but the *info bits are lost.
1121 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001122 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
1125
1126out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001127 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001128 sigaddset(&pending->signal, sig);
Eric W. Biedermanc3ad2c32018-07-23 15:20:37 -05001129
1130 /* Let multiprocess signals appear after on-going forks */
1131 if (type > PIDTYPE_TGID) {
1132 struct multiprocess_signals *delayed;
1133 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1134 sigset_t *signal = &delayed->signal;
1135 /* Can't queue both a stop and a continue signal */
1136 if (sig == SIGCONT)
1137 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1138 else if (sig_kernel_stop(sig))
1139 sigdelset(signal, SIGCONT);
1140 sigaddset(signal, sig);
1141 }
1142 }
1143
Eric W. Biederman07296142018-07-13 21:39:13 -05001144 complete_signal(sig, t, type);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001145ret:
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001146 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001147 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148}
1149
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001150static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001151 enum pid_type type)
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001152{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001153 int from_ancestor_ns = 0;
1154
1155#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001156 from_ancestor_ns = si_fromuser(info) &&
1157 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001158#endif
1159
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001160 return __send_signal(sig, info, t, type, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001161}
1162
Al Viro4aaefee2012-11-05 13:09:56 -05001163static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001164{
Al Viro4aaefee2012-11-05 13:09:56 -05001165 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001166 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001167
Al Viroca5cd872007-10-29 04:31:16 +00001168#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001169 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001170 {
1171 int i;
1172 for (i = 0; i < 16; i++) {
1173 unsigned char insn;
1174
Andi Kleenb45c6e72010-01-08 14:42:52 -08001175 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1176 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001177 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001178 }
1179 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001180 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001181#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001182 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001183 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001184 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001185}
1186
1187static int __init setup_print_fatal_signals(char *str)
1188{
1189 get_option (&str, &print_fatal_signals);
1190
1191 return 1;
1192}
1193
1194__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001196int
1197__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1198{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001199 return send_signal(sig, info, p, PIDTYPE_TGID);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001200}
1201
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001202int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001203 enum pid_type type)
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001204{
1205 unsigned long flags;
1206 int ret = -ESRCH;
1207
1208 if (lock_task_sighand(p, &flags)) {
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001209 ret = send_signal(sig, info, p, type);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001210 unlock_task_sighand(p, &flags);
1211 }
1212
1213 return ret;
1214}
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216/*
1217 * Force a signal that the process can't ignore: if necessary
1218 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001219 *
1220 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1221 * since we do not want to have a signal handler that was blocked
1222 * be invoked when user space had explicitly blocked it.
1223 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001224 * We don't want to have recursive SIGSEGV's etc, for example,
1225 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227int
1228force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1229{
1230 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001231 int ret, blocked, ignored;
1232 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001235 action = &t->sighand->action[sig-1];
1236 ignored = action->sa.sa_handler == SIG_IGN;
1237 blocked = sigismember(&t->blocked, sig);
1238 if (blocked || ignored) {
1239 action->sa.sa_handler = SIG_DFL;
1240 if (blocked) {
1241 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001242 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 }
Jamie Ileseb61b592017-08-18 15:16:18 -07001245 /*
1246 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1247 * debugging to leave init killable.
1248 */
1249 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001250 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Eric W. Biedermanb21c5bd2018-07-21 11:34:03 -05001251 ret = send_signal(sig, info, t, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1253
1254 return ret;
1255}
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257/*
1258 * Nuke all other threads in the group.
1259 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001260int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001262 struct task_struct *t = p;
1263 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 p->signal->group_stop_count = 0;
1266
Oleg Nesterov09faef12010-05-26 14:43:11 -07001267 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001268 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001269 count++;
1270
1271 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 if (t->exit_state)
1273 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 signal_wake_up(t, 1);
1276 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001277
1278 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279}
1280
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001281struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1282 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001283{
1284 struct sighand_struct *sighand;
1285
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001286 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001287 for (;;) {
1288 sighand = rcu_dereference(tsk->sighand);
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001289 if (unlikely(sighand == NULL))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001290 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001291
Oleg Nesterov392809b2014-09-28 23:44:18 +02001292 /*
1293 * This sighand can be already freed and even reused, but
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001294 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
Oleg Nesterov392809b2014-09-28 23:44:18 +02001295 * initializes ->siglock: this slab can't go away, it has
1296 * the same object type, ->siglock can't be reinitialized.
1297 *
1298 * We need to ensure that tsk->sighand is still the same
1299 * after we take the lock, we can race with de_thread() or
1300 * __exit_signal(). In the latter case the next iteration
1301 * must see ->sighand == NULL.
1302 */
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001303 spin_lock_irqsave(&sighand->siglock, *flags);
1304 if (likely(sighand == tsk->sighand))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001305 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001306 spin_unlock_irqrestore(&sighand->siglock, *flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001307 }
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001308 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001309
1310 return sighand;
1311}
1312
David Howellsc69e8d92008-11-14 10:39:19 +11001313/*
1314 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001315 */
Eric W. Biederman01024982018-07-13 18:40:57 -05001316int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1317 enum pid_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318{
David Howells694f6902010-08-04 16:59:14 +01001319 int ret;
1320
1321 rcu_read_lock();
1322 ret = check_kill_permission(sig, info, p);
1323 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001325 if (!ret && sig)
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001326 ret = do_send_sig_info(sig, info, p, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 return ret;
1329}
1330
1331/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001332 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001334 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001336int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct task_struct *p = NULL;
1339 int retval, success;
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 success = 0;
1342 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001343 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001344 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 success |= !err;
1346 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001347 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return success ? 0 : retval;
1349}
1350
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001351int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001353 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 struct task_struct *p;
1355
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001356 for (;;) {
1357 rcu_read_lock();
1358 p = pid_task(pid, PIDTYPE_PID);
1359 if (p)
Eric W. Biederman01024982018-07-13 18:40:57 -05001360 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001361 rcu_read_unlock();
1362 if (likely(!p || error != -ESRCH))
1363 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001364
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001365 /*
1366 * The task was unhashed in between, try again. If it
1367 * is dead, pid_task() will return NULL, if we race with
1368 * de_thread() it will find the new leader.
1369 */
1370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
1372
Eric W. Biederman6c478ae2017-04-17 22:10:04 -05001373static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001374{
1375 int error;
1376 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001377 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001378 rcu_read_unlock();
1379 return error;
1380}
1381
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001382static inline bool kill_as_cred_perm(const struct cred *cred,
1383 struct task_struct *target)
Serge Hallynd178bc32011-09-26 10:45:18 -05001384{
1385 const struct cred *pcred = __task_cred(target);
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001386
1387 return uid_eq(cred->euid, pcred->suid) ||
1388 uid_eq(cred->euid, pcred->uid) ||
1389 uid_eq(cred->uid, pcred->suid) ||
1390 uid_eq(cred->uid, pcred->uid);
Serge Hallynd178bc32011-09-26 10:45:18 -05001391}
1392
Eric W. Biederman2425c082006-10-02 02:17:28 -07001393/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001394int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001395 const struct cred *cred)
Harald Welte46113832005-10-10 19:44:29 +02001396{
1397 int ret = -EINVAL;
1398 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001399 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001400
1401 if (!valid_signal(sig))
1402 return ret;
1403
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001404 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001405 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001406 if (!p) {
1407 ret = -ESRCH;
1408 goto out_unlock;
1409 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001410 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001411 ret = -EPERM;
1412 goto out_unlock;
1413 }
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001414 ret = security_task_kill(p, info, sig, cred);
David Quigley8f95dc52006-06-30 01:55:47 -07001415 if (ret)
1416 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001417
1418 if (sig) {
1419 if (lock_task_sighand(p, &flags)) {
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001420 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001421 unlock_task_sighand(p, &flags);
1422 } else
1423 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001424 }
1425out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001426 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001427 return ret;
1428}
Serge Hallynd178bc32011-09-26 10:45:18 -05001429EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431/*
1432 * kill_something_info() interprets pid in interesting ways just like kill(2).
1433 *
1434 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1435 * is probably wrong. Should make it like BSD or SYSV.
1436 */
1437
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001438static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001440 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001441
1442 if (pid > 0) {
1443 rcu_read_lock();
1444 ret = kill_pid_info(sig, info, find_vpid(pid));
1445 rcu_read_unlock();
1446 return ret;
1447 }
1448
zhongjiang4ea77012017-07-10 15:52:57 -07001449 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1450 if (pid == INT_MIN)
1451 return -ESRCH;
1452
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001453 read_lock(&tasklist_lock);
1454 if (pid != -1) {
1455 ret = __kill_pgrp_info(sig, info,
1456 pid ? find_vpid(-pid) : task_pgrp(current));
1457 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 int retval = 0, count = 0;
1459 struct task_struct * p;
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001462 if (task_pid_vnr(p) > 1 &&
1463 !same_thread_group(p, current)) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001464 int err = group_send_sig_info(sig, info, p,
1465 PIDTYPE_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 ++count;
1467 if (err != -EPERM)
1468 retval = err;
1469 }
1470 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001471 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001473 read_unlock(&tasklist_lock);
1474
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001475 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476}
1477
1478/*
1479 * These are for backward compatibility with the rest of the kernel source.
1480 */
1481
Randy Dunlap5aba0852011-04-04 14:59:31 -07001482int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /*
1485 * Make sure legacy kernel users don't send in bad values
1486 * (normal paths check this in check_kill_permission).
1487 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001488 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -EINVAL;
1490
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001491 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492}
1493
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001494#define __si_special(priv) \
1495 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1496
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497int
1498send_sig(int sig, struct task_struct *p, int priv)
1499{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001500 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
1502
Christian Brauner52cba1a2018-08-21 21:59:51 -07001503void force_sig(int sig, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001505 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506}
1507
1508/*
1509 * When things go south during signal handling, we
1510 * will force a SIGSEGV. And if the signal that caused
1511 * the problem was already a SIGSEGV, we'll want to
1512 * make sure we don't even try to deliver the signal..
1513 */
Christian Brauner52cba1a2018-08-21 21:59:51 -07001514void force_sigsegv(int sig, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515{
1516 if (sig == SIGSEGV) {
1517 unsigned long flags;
1518 spin_lock_irqsave(&p->sighand->siglock, flags);
1519 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1520 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1521 }
1522 force_sig(SIGSEGV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}
1524
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001525int force_sig_fault(int sig, int code, void __user *addr
1526 ___ARCH_SI_TRAPNO(int trapno)
1527 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1528 , struct task_struct *t)
1529{
1530 struct siginfo info;
1531
1532 clear_siginfo(&info);
1533 info.si_signo = sig;
1534 info.si_errno = 0;
1535 info.si_code = code;
1536 info.si_addr = addr;
1537#ifdef __ARCH_SI_TRAPNO
1538 info.si_trapno = trapno;
1539#endif
1540#ifdef __ia64__
1541 info.si_imm = imm;
1542 info.si_flags = flags;
1543 info.si_isr = isr;
1544#endif
1545 return force_sig_info(info.si_signo, &info, t);
1546}
1547
1548int send_sig_fault(int sig, int code, void __user *addr
1549 ___ARCH_SI_TRAPNO(int trapno)
1550 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1551 , struct task_struct *t)
1552{
1553 struct siginfo info;
1554
1555 clear_siginfo(&info);
1556 info.si_signo = sig;
1557 info.si_errno = 0;
1558 info.si_code = code;
1559 info.si_addr = addr;
1560#ifdef __ARCH_SI_TRAPNO
1561 info.si_trapno = trapno;
1562#endif
1563#ifdef __ia64__
1564 info.si_imm = imm;
1565 info.si_flags = flags;
1566 info.si_isr = isr;
1567#endif
1568 return send_sig_info(info.si_signo, &info, t);
1569}
1570
Eric W. Biederman38246732018-01-18 18:54:31 -06001571int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1572{
1573 struct siginfo info;
1574
1575 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1576 clear_siginfo(&info);
1577 info.si_signo = SIGBUS;
1578 info.si_errno = 0;
1579 info.si_code = code;
1580 info.si_addr = addr;
1581 info.si_addr_lsb = lsb;
1582 return force_sig_info(info.si_signo, &info, t);
1583}
1584
1585int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1586{
1587 struct siginfo info;
1588
1589 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1590 clear_siginfo(&info);
1591 info.si_signo = SIGBUS;
1592 info.si_errno = 0;
1593 info.si_code = code;
1594 info.si_addr = addr;
1595 info.si_addr_lsb = lsb;
1596 return send_sig_info(info.si_signo, &info, t);
1597}
1598EXPORT_SYMBOL(send_sig_mceerr);
Eric W. Biederman38246732018-01-18 18:54:31 -06001599
Eric W. Biederman38246732018-01-18 18:54:31 -06001600int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1601{
1602 struct siginfo info;
1603
1604 clear_siginfo(&info);
1605 info.si_signo = SIGSEGV;
1606 info.si_errno = 0;
1607 info.si_code = SEGV_BNDERR;
1608 info.si_addr = addr;
1609 info.si_lower = lower;
1610 info.si_upper = upper;
1611 return force_sig_info(info.si_signo, &info, current);
1612}
Eric W. Biederman38246732018-01-18 18:54:31 -06001613
1614#ifdef SEGV_PKUERR
1615int force_sig_pkuerr(void __user *addr, u32 pkey)
1616{
1617 struct siginfo info;
1618
1619 clear_siginfo(&info);
1620 info.si_signo = SIGSEGV;
1621 info.si_errno = 0;
1622 info.si_code = SEGV_PKUERR;
1623 info.si_addr = addr;
1624 info.si_pkey = pkey;
1625 return force_sig_info(info.si_signo, &info, current);
1626}
1627#endif
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001628
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001629/* For the crazy architectures that include trap information in
1630 * the errno field, instead of an actual errno value.
1631 */
1632int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1633{
1634 struct siginfo info;
1635
1636 clear_siginfo(&info);
1637 info.si_signo = SIGTRAP;
1638 info.si_errno = errno;
1639 info.si_code = TRAP_HWBKPT;
1640 info.si_addr = addr;
1641 return force_sig_info(info.si_signo, &info, current);
1642}
1643
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001644int kill_pgrp(struct pid *pid, int sig, int priv)
1645{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001646 int ret;
1647
1648 read_lock(&tasklist_lock);
1649 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1650 read_unlock(&tasklist_lock);
1651
1652 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001653}
1654EXPORT_SYMBOL(kill_pgrp);
1655
1656int kill_pid(struct pid *pid, int sig, int priv)
1657{
1658 return kill_pid_info(sig, __si_special(priv), pid);
1659}
1660EXPORT_SYMBOL(kill_pid);
1661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662/*
1663 * These functions support sending signals using preallocated sigqueue
1664 * structures. This is needed "because realtime applications cannot
1665 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001666 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 * we allocate the sigqueue structure from the timer_create. If this
1668 * allocation fails we are able to report the failure to the application
1669 * with an EAGAIN error.
1670 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671struct sigqueue *sigqueue_alloc(void)
1672{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001673 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001675 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001677
1678 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679}
1680
1681void sigqueue_free(struct sigqueue *q)
1682{
1683 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001684 spinlock_t *lock = &current->sighand->siglock;
1685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1687 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001688 * We must hold ->siglock while testing q->list
1689 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001690 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001692 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001693 q->flags &= ~SIGQUEUE_PREALLOC;
1694 /*
1695 * If it is queued it will be freed when dequeued,
1696 * like the "regular" sigqueue.
1697 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001698 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001699 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001700 spin_unlock_irqrestore(lock, flags);
1701
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001702 if (q)
1703 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704}
1705
Eric W. Biederman24122c72018-07-20 14:30:23 -05001706int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001707{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001708 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001709 struct sigpending *pending;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001710 struct task_struct *t;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001711 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001712 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001713
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001714 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001715
1716 ret = -1;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001717 rcu_read_lock();
1718 t = pid_task(pid, type);
1719 if (!t || !likely(lock_task_sighand(t, &flags)))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001720 goto ret;
1721
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001722 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001723 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001724 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001725 goto out;
1726
1727 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001728 if (unlikely(!list_empty(&q->list))) {
1729 /*
1730 * If an SI_TIMER entry is already queue just increment
1731 * the overrun count.
1732 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001733 BUG_ON(q->info.si_code != SI_TIMER);
1734 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001735 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001736 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001737 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001738 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001739
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001740 signalfd_notify(t, sig);
Eric W. Biederman24122c72018-07-20 14:30:23 -05001741 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001742 list_add_tail(&q->list, &pending->list);
1743 sigaddset(&pending->signal, sig);
Eric W. Biederman07296142018-07-13 21:39:13 -05001744 complete_signal(sig, t, type);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001745 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001746out:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001747 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001748 unlock_task_sighand(t, &flags);
1749ret:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001750 rcu_read_unlock();
Oleg Nesterove62e6652008-04-30 00:52:56 -07001751 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001752}
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 * Let a parent know about the death of a child.
1756 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001757 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001758 * Returns true if our parent ignored us and so we've switched to
1759 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001761bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762{
1763 struct siginfo info;
1764 unsigned long flags;
1765 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001766 bool autoreap = false;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001767 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 BUG_ON(sig == -1);
1770
1771 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001772 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Tejun Heod21142e2011-06-17 16:50:34 +02001774 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1776
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001777 if (sig != SIGCHLD) {
1778 /*
1779 * This is only possible if parent == real_parent.
1780 * Check if it has changed security domain.
1781 */
1782 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1783 sig = SIGCHLD;
1784 }
1785
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001786 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 info.si_signo = sig;
1788 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001789 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001790 * We are under tasklist_lock here so our parent is tied to
1791 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001792 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001793 * task_active_pid_ns will always return the same pid namespace
1794 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001795 *
1796 * write_lock() currently calls preempt_disable() which is the
1797 * same as rcu_read_lock(), but according to Oleg, this is not
1798 * correct to rely on this
1799 */
1800 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001801 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001802 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1803 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001804 rcu_read_unlock();
1805
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001806 task_cputime(tsk, &utime, &stime);
1807 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1808 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 info.si_status = tsk->exit_code & 0x7f;
1811 if (tsk->exit_code & 0x80)
1812 info.si_code = CLD_DUMPED;
1813 else if (tsk->exit_code & 0x7f)
1814 info.si_code = CLD_KILLED;
1815 else {
1816 info.si_code = CLD_EXITED;
1817 info.si_status = tsk->exit_code >> 8;
1818 }
1819
1820 psig = tsk->parent->sighand;
1821 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001822 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1824 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1825 /*
1826 * We are exiting and our parent doesn't care. POSIX.1
1827 * defines special semantics for setting SIGCHLD to SIG_IGN
1828 * or setting the SA_NOCLDWAIT flag: we should be reaped
1829 * automatically and not left for our parent's wait4 call.
1830 * Rather than having the parent do it as a magic kind of
1831 * signal handler, we just set this to tell do_exit that we
1832 * can be cleaned up without becoming a zombie. Note that
1833 * we still call __wake_up_parent in this case, because a
1834 * blocked sys_wait4 might now return -ECHILD.
1835 *
1836 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1837 * is implementation-defined: we do (if you don't want
1838 * it, just use SIG_IGN instead).
1839 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001840 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001842 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001844 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 __group_send_sig_info(sig, &info, tsk->parent);
1846 __wake_up_parent(tsk, tsk->parent);
1847 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001848
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001849 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850}
1851
Tejun Heo75b95952011-03-23 10:37:01 +01001852/**
1853 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1854 * @tsk: task reporting the state change
1855 * @for_ptracer: the notification is for ptracer
1856 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1857 *
1858 * Notify @tsk's parent that the stopped/continued state has changed. If
1859 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1860 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1861 *
1862 * CONTEXT:
1863 * Must be called with tasklist_lock at least read locked.
1864 */
1865static void do_notify_parent_cldstop(struct task_struct *tsk,
1866 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867{
1868 struct siginfo info;
1869 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001870 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 struct sighand_struct *sighand;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001872 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
Tejun Heo75b95952011-03-23 10:37:01 +01001874 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001875 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001876 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001877 tsk = tsk->group_leader;
1878 parent = tsk->real_parent;
1879 }
1880
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001881 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 info.si_signo = SIGCHLD;
1883 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001884 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001885 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001886 */
1887 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001888 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001889 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001890 rcu_read_unlock();
1891
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001892 task_cputime(tsk, &utime, &stime);
1893 info.si_utime = nsec_to_clock_t(utime);
1894 info.si_stime = nsec_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 info.si_code = why;
1897 switch (why) {
1898 case CLD_CONTINUED:
1899 info.si_status = SIGCONT;
1900 break;
1901 case CLD_STOPPED:
1902 info.si_status = tsk->signal->group_exit_code & 0x7f;
1903 break;
1904 case CLD_TRAPPED:
1905 info.si_status = tsk->exit_code & 0x7f;
1906 break;
1907 default:
1908 BUG();
1909 }
1910
1911 sighand = parent->sighand;
1912 spin_lock_irqsave(&sighand->siglock, flags);
1913 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1914 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1915 __group_send_sig_info(SIGCHLD, &info, parent);
1916 /*
1917 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1918 */
1919 __wake_up_parent(tsk, parent);
1920 spin_unlock_irqrestore(&sighand->siglock, flags);
1921}
1922
Christian Brauner6527de92018-08-21 21:59:59 -07001923static inline bool may_ptrace_stop(void)
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001924{
Tejun Heod21142e2011-06-17 16:50:34 +02001925 if (!likely(current->ptrace))
Christian Brauner6527de92018-08-21 21:59:59 -07001926 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001927 /*
1928 * Are we in the middle of do_coredump?
1929 * If so and our tracer is also part of the coredump stopping
1930 * is a deadlock situation, and pointless because our tracer
1931 * is dead so don't allow us to stop.
1932 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001933 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001934 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001935 *
1936 * This is almost outdated, a task with the pending SIGKILL can't
1937 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1938 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001939 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001940 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001941 unlikely(current->mm == current->parent->mm))
Christian Brauner6527de92018-08-21 21:59:59 -07001942 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001943
Christian Brauner6527de92018-08-21 21:59:59 -07001944 return true;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001945}
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001948 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001949 * Called with the siglock held.
1950 */
Christian Braunerf99e9d82018-08-21 22:00:50 -07001951static bool sigkill_pending(struct task_struct *tsk)
Roland McGrath1a669c22008-02-06 01:37:37 -08001952{
Christian Braunerf99e9d82018-08-21 22:00:50 -07001953 return sigismember(&tsk->pending.signal, SIGKILL) ||
1954 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001955}
1956
1957/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 * This must be called with current->sighand->siglock held.
1959 *
1960 * This should be the path for all ptrace stops.
1961 * We always set current->last_siginfo while stopped here.
1962 * That makes it a way to test a stopped process for
1963 * being ptrace-stopped vs being job-control-stopped.
1964 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001965 * If we actually decide not to stop at all because the tracer
1966 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001968static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001969 __releases(&current->sighand->siglock)
1970 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001972 bool gstop_done = false;
1973
Roland McGrath1a669c22008-02-06 01:37:37 -08001974 if (arch_ptrace_stop_needed(exit_code, info)) {
1975 /*
1976 * The arch code has something special to do before a
1977 * ptrace stop. This is allowed to block, e.g. for faults
1978 * on user stack pages. We can't keep the siglock while
1979 * calling arch_ptrace_stop, so we must release it now.
1980 * To preserve proper semantics, we must do this before
1981 * any signal bookkeeping like checking group_stop_count.
1982 * Meanwhile, a SIGKILL could come in before we retake the
1983 * siglock. That must prevent us from sleeping in TASK_TRACED.
1984 * So after regaining the lock, we must check for SIGKILL.
1985 */
1986 spin_unlock_irq(&current->sighand->siglock);
1987 arch_ptrace_stop(exit_code, info);
1988 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001989 if (sigkill_pending(current))
1990 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001991 }
1992
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02001993 set_special_state(TASK_TRACED);
1994
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001996 * We're committing to trapping. TRACED should be visible before
1997 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1998 * Also, transition to TRACED and updates to ->jobctl should be
1999 * atomic with respect to siglock and should be done after the arch
2000 * hook as siglock is released and regrabbed across it.
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002001 *
2002 * TRACER TRACEE
2003 *
2004 * ptrace_attach()
2005 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2006 * do_wait()
2007 * set_current_state() smp_wmb();
2008 * ptrace_do_wait()
2009 * wait_task_stopped()
2010 * task_stopped_code()
2011 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 */
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002013 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 current->last_siginfo = info;
2016 current->exit_code = exit_code;
2017
Tejun Heod79fdd62011-03-23 10:37:00 +01002018 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 * If @why is CLD_STOPPED, we're trapping to participate in a group
2020 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02002021 * across siglock relocks since INTERRUPT was scheduled, PENDING
2022 * could be clear now. We act as if SIGCONT is received after
2023 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01002024 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002025 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002027
Tejun Heofb1d9102011-06-14 11:20:17 +02002028 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02002029 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02002030 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2031 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02002032
Tejun Heo81be24b2011-06-02 11:13:59 +02002033 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002034 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 spin_unlock_irq(&current->sighand->siglock);
2037 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07002038 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01002039 /*
2040 * Notify parents of the stop.
2041 *
2042 * While ptraced, there are two parents - the ptracer and
2043 * the real_parent of the group_leader. The ptracer should
2044 * know about every stop while the real parent is only
2045 * interested in the completion of group stop. The states
2046 * for the two don't interact with each other. Notify
2047 * separately unless they're gonna be duplicates.
2048 */
2049 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002050 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01002051 do_notify_parent_cldstop(current, false, why);
2052
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002053 /*
2054 * Don't want to allow preemption here, because
2055 * sys_ptrace() needs this task to be inactive.
2056 *
2057 * XXX: implement read_unlock_no_resched().
2058 */
2059 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002061 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002062 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 } else {
2064 /*
2065 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002066 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01002067 *
2068 * If @gstop_done, the ptracer went away between group stop
2069 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02002070 * JOBCTL_STOP_PENDING on us and we'll re-enter
2071 * TASK_STOPPED in do_signal_stop() on return, so notifying
2072 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01002074 if (gstop_done)
2075 do_notify_parent_cldstop(current, false, why);
2076
Oleg Nesterov9899d112013-01-21 20:48:00 +01002077 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002078 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08002079 if (clear_code)
2080 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002081 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 }
2083
2084 /*
2085 * We are back. Now reacquire the siglock before touching
2086 * last_siginfo, so that we are sure to have synchronized with
2087 * any signal-sending on another CPU that wants to examine it.
2088 */
2089 spin_lock_irq(&current->sighand->siglock);
2090 current->last_siginfo = NULL;
2091
Tejun Heo544b2c92011-06-14 11:20:18 +02002092 /* LISTENING can be set only during STOP traps, clear it */
2093 current->jobctl &= ~JOBCTL_LISTENING;
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 /*
2096 * Queued signals ignored us while we were stopped for tracing.
2097 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07002098 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07002100 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101}
2102
Tejun Heo3544d722011-06-14 11:20:15 +02002103static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
2105 siginfo_t info;
2106
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002107 clear_siginfo(&info);
Tejun Heo3544d722011-06-14 11:20:15 +02002108 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002110 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002111 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02002114 ptrace_stop(exit_code, why, 1, &info);
2115}
2116
2117void ptrace_notify(int exit_code)
2118{
2119 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002120 if (unlikely(current->task_works))
2121 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02002122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02002124 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 spin_unlock_irq(&current->sighand->siglock);
2126}
2127
Tejun Heo73ddff22011-06-14 11:20:14 +02002128/**
2129 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2130 * @signr: signr causing group stop if initiating
2131 *
2132 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2133 * and participate in it. If already set, participate in the existing
2134 * group stop. If participated in a group stop (and thus slept), %true is
2135 * returned with siglock released.
2136 *
2137 * If ptraced, this function doesn't handle stop itself. Instead,
2138 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2139 * untouched. The caller must ensure that INTERRUPT trap handling takes
2140 * places afterwards.
2141 *
2142 * CONTEXT:
2143 * Must be called with @current->sighand->siglock held, which is released
2144 * on %true return.
2145 *
2146 * RETURNS:
2147 * %false if group stop is already cancelled or ptrace trap is scheduled.
2148 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002150static bool do_signal_stop(int signr)
2151 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
2153 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
Tejun Heoa8f072c2011-06-02 11:13:59 +02002155 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002156 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002157 struct task_struct *t;
2158
Tejun Heoa8f072c2011-06-02 11:13:59 +02002159 /* signr will be recorded in task->jobctl for retries */
2160 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002161
Tejun Heoa8f072c2011-06-02 11:13:59 +02002162 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002163 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002164 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002166 * There is no group stop already in progress. We must
2167 * initiate one now.
2168 *
2169 * While ptraced, a task may be resumed while group stop is
2170 * still in effect and then receive a stop signal and
2171 * initiate another group stop. This deviates from the
2172 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002173 * cause two group stops when !ptraced. That is why we
2174 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002175 *
2176 * The condition can be distinguished by testing whether
2177 * SIGNAL_STOP_STOPPED is already set. Don't generate
2178 * group_exit_code in such case.
2179 *
2180 * This is not necessary for SIGNAL_STOP_CONTINUED because
2181 * an intervening stop signal is required to cause two
2182 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002184 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2185 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002186
Tejun Heo7dd3db52011-06-02 11:14:00 +02002187 sig->group_stop_count = 0;
2188
2189 if (task_set_jobctl_pending(current, signr | gstop))
2190 sig->group_stop_count++;
2191
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002192 t = current;
2193 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002194 /*
2195 * Setting state to TASK_STOPPED for a group
2196 * stop is always done with the siglock held,
2197 * so this check has no races.
2198 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002199 if (!task_is_stopped(t) &&
2200 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002201 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002202 if (likely(!(t->ptrace & PT_SEIZED)))
2203 signal_wake_up(t, 0);
2204 else
2205 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002206 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002207 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002208 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002209
Tejun Heod21142e2011-06-17 16:50:34 +02002210 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002211 int notify = 0;
2212
2213 /*
2214 * If there are no other threads in the group, or if there
2215 * is a group stop in progress and we are the last to stop,
2216 * report to the parent.
2217 */
2218 if (task_participate_group_stop(current))
2219 notify = CLD_STOPPED;
2220
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002221 set_special_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002222 spin_unlock_irq(&current->sighand->siglock);
2223
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002224 /*
2225 * Notify the parent of the group stop completion. Because
2226 * we're not holding either the siglock or tasklist_lock
2227 * here, ptracer may attach inbetween; however, this is for
2228 * group stop and should always be delivered to the real
2229 * parent of the group leader. The new ptracer will get
2230 * its notification when this task transitions into
2231 * TASK_TRACED.
2232 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002233 if (notify) {
2234 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002235 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002236 read_unlock(&tasklist_lock);
2237 }
2238
2239 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002240 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002241 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002242 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002243 /*
2244 * While ptraced, group stop is handled by STOP trap.
2245 * Schedule it and let the caller deal with it.
2246 */
2247 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2248 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002249 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002250}
Tejun Heod79fdd62011-03-23 10:37:00 +01002251
Tejun Heo73ddff22011-06-14 11:20:14 +02002252/**
2253 * do_jobctl_trap - take care of ptrace jobctl traps
2254 *
Tejun Heo3544d722011-06-14 11:20:15 +02002255 * When PT_SEIZED, it's used for both group stop and explicit
2256 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2257 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2258 * the stop signal; otherwise, %SIGTRAP.
2259 *
2260 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2261 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002262 *
2263 * CONTEXT:
2264 * Must be called with @current->sighand->siglock held, which may be
2265 * released and re-acquired before returning with intervening sleep.
2266 */
2267static void do_jobctl_trap(void)
2268{
Tejun Heo3544d722011-06-14 11:20:15 +02002269 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002270 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002271
Tejun Heo3544d722011-06-14 11:20:15 +02002272 if (current->ptrace & PT_SEIZED) {
2273 if (!signal->group_stop_count &&
2274 !(signal->flags & SIGNAL_STOP_STOPPED))
2275 signr = SIGTRAP;
2276 WARN_ON_ONCE(!signr);
2277 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2278 CLD_STOPPED);
2279 } else {
2280 WARN_ON_ONCE(!signr);
2281 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002282 current->exit_code = 0;
2283 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
Al Viro94eb22d2012-11-05 13:08:06 -05002286static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002287{
Oleg Nesterov8a352412011-07-21 17:06:53 +02002288 /*
2289 * We do not check sig_kernel_stop(signr) but set this marker
2290 * unconditionally because we do not know whether debugger will
2291 * change signr. This flag has no meaning unless we are going
2292 * to stop after return from ptrace_stop(). In this case it will
2293 * be checked in do_signal_stop(), we should only stop if it was
2294 * not cleared by SIGCONT while we were sleeping. See also the
2295 * comment in dequeue_signal().
2296 */
2297 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002298 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002299
2300 /* We're back. Did the debugger cancel the sig? */
2301 signr = current->exit_code;
2302 if (signr == 0)
2303 return signr;
2304
2305 current->exit_code = 0;
2306
Randy Dunlap5aba0852011-04-04 14:59:31 -07002307 /*
2308 * Update the siginfo structure if the signal has
2309 * changed. If the debugger wanted something
2310 * specific in the siginfo structure then it should
2311 * have updated *info via PTRACE_SETSIGINFO.
2312 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002313 if (signr != info->si_signo) {
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002314 clear_siginfo(info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002315 info->si_signo = signr;
2316 info->si_errno = 0;
2317 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002318 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002319 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002320 info->si_uid = from_kuid_munged(current_user_ns(),
2321 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002322 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002323 }
2324
2325 /* If the (new) signal is now blocked, requeue it. */
2326 if (sigismember(&current->blocked, signr)) {
Eric W. Biedermanb21c5bd2018-07-21 11:34:03 -05002327 send_signal(signr, info, current, PIDTYPE_PID);
Roland McGrath18c98b62008-04-17 18:44:38 -07002328 signr = 0;
2329 }
2330
2331 return signr;
2332}
2333
Christian Brauner20ab7212018-08-21 22:00:54 -07002334bool get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002336 struct sighand_struct *sighand = current->sighand;
2337 struct signal_struct *signal = current->signal;
2338 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002340 if (unlikely(current->task_works))
2341 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002342
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302343 if (unlikely(uprobe_deny_signal()))
Christian Brauner20ab7212018-08-21 22:00:54 -07002344 return false;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302345
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002346 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002347 * Do this once, we can't return to user-mode if freezing() == T.
2348 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2349 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002350 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002351 try_to_freeze();
2352
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002353relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002354 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002355 /*
2356 * Every stopped thread goes here after wakeup. Check to see if
2357 * we should notify the parent, prepare_signal(SIGCONT) encodes
2358 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2359 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002360 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002361 int why;
2362
2363 if (signal->flags & SIGNAL_CLD_CONTINUED)
2364 why = CLD_CONTINUED;
2365 else
2366 why = CLD_STOPPED;
2367
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002368 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002369
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002370 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002371
Tejun Heoceb6bd62011-03-23 10:37:01 +01002372 /*
2373 * Notify the parent that we're continuing. This event is
2374 * always per-process and doesn't make whole lot of sense
2375 * for ptracers, who shouldn't consume the state via
2376 * wait(2) either, but, for backward compatibility, notify
2377 * the ptracer of the group leader too unless it's gonna be
2378 * a duplicate.
2379 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002380 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002381 do_notify_parent_cldstop(current, false, why);
2382
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002383 if (ptrace_reparented(current->group_leader))
2384 do_notify_parent_cldstop(current->group_leader,
2385 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002386 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002387
Oleg Nesterove4420552008-04-30 00:52:44 -07002388 goto relock;
2389 }
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 for (;;) {
2392 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002393
2394 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2395 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002396 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002397
Tejun Heo73ddff22011-06-14 11:20:14 +02002398 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2399 do_jobctl_trap();
2400 spin_unlock_irq(&sighand->siglock);
2401 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 }
2403
Richard Weinberger828b1f62013-10-07 15:26:57 +02002404 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Tejun Heodd1d6772011-06-02 11:14:00 +02002406 if (!signr)
2407 break; /* will return 0 */
2408
Oleg Nesterov8a352412011-07-21 17:06:53 +02002409 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002410 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002412 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 }
2414
Tejun Heodd1d6772011-06-02 11:14:00 +02002415 ka = &sighand->action[signr-1];
2416
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002417 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002418 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2421 continue;
2422 if (ka->sa.sa_handler != SIG_DFL) {
2423 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002424 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
2426 if (ka->sa.sa_flags & SA_ONESHOT)
2427 ka->sa.sa_handler = SIG_DFL;
2428
2429 break; /* will return non-zero "signr" value */
2430 }
2431
2432 /*
2433 * Now we are doing the default action for this signal.
2434 */
2435 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2436 continue;
2437
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002438 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002439 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002440 * Container-init gets no signals it doesn't want from same
2441 * container.
2442 *
2443 * Note that if global/container-init sees a sig_kernel_only()
2444 * signal here, the signal must have been generated internally
2445 * or must have come from an ancestor namespace. In either
2446 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002447 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002448 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002449 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 continue;
2451
2452 if (sig_kernel_stop(signr)) {
2453 /*
2454 * The default action is to stop all threads in
2455 * the thread group. The job control signals
2456 * do nothing in an orphaned pgrp, but SIGSTOP
2457 * always works. Note that siglock needs to be
2458 * dropped during the call to is_orphaned_pgrp()
2459 * because of lock ordering with tasklist_lock.
2460 * This allows an intervening SIGCONT to be posted.
2461 * We need to check for that and bail out if necessary.
2462 */
2463 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002464 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
2466 /* signals can be posted during this window */
2467
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002468 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 goto relock;
2470
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002471 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 }
2473
Richard Weinberger828b1f62013-10-07 15:26:57 +02002474 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 /* It released the siglock. */
2476 goto relock;
2477 }
2478
2479 /*
2480 * We didn't actually stop, due to a race
2481 * with SIGCONT or something like that.
2482 */
2483 continue;
2484 }
2485
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002486 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
2488 /*
2489 * Anything else is fatal, maybe with a core dump.
2490 */
2491 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002494 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002495 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002496 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 /*
2498 * If it was able to dump core, this kills all
2499 * other threads in the group and synchronizes with
2500 * their demise. If we lost the race with another
2501 * thread getting here, it set group_exit_code
2502 * first and our do_group_exit call below will use
2503 * that value and ignore the one we pass it.
2504 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002505 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507
2508 /*
2509 * Death signals, no core dump.
2510 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002511 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 /* NOTREACHED */
2513 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002514 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002515
2516 ksig->sig = signr;
2517 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519
Matt Fleming5e6292c2012-01-10 15:11:17 -08002520/**
Al Viroefee9842012-04-28 02:04:15 -04002521 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002522 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002523 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002524 *
Masanari Iidae2278672014-02-18 22:54:36 +09002525 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002526 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002527 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002528 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002529 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002530static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002531{
2532 sigset_t blocked;
2533
Al Viroa610d6e2012-05-21 23:42:15 -04002534 /* A signal was successfully delivered, and the
2535 saved sigmask was stored on the signal frame,
2536 and will be restored by sigreturn. So we can
2537 simply clear the restore sigmask flag. */
2538 clear_restore_sigmask();
2539
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002540 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2541 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2542 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002543 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002544 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002545}
2546
Al Viro2ce5da12012-11-07 15:11:25 -05002547void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2548{
2549 if (failed)
2550 force_sigsegv(ksig->sig, current);
2551 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002552 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002553}
2554
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002555/*
2556 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002557 * group-wide signal. Other threads should be notified now to take
2558 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002559 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002560static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002561{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002562 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002563 struct task_struct *t;
2564
Oleg Nesterovf646e222011-04-27 19:18:39 +02002565 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2566 if (sigisemptyset(&retarget))
2567 return;
2568
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002569 t = tsk;
2570 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002571 if (t->flags & PF_EXITING)
2572 continue;
2573
2574 if (!has_pending_signals(&retarget, &t->blocked))
2575 continue;
2576 /* Remove the signals this thread can handle. */
2577 sigandsets(&retarget, &retarget, &t->blocked);
2578
2579 if (!signal_pending(t))
2580 signal_wake_up(t, 0);
2581
2582 if (sigisemptyset(&retarget))
2583 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002584 }
2585}
2586
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002587void exit_signals(struct task_struct *tsk)
2588{
2589 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002590 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002591
Tejun Heo77e4ef92011-12-12 18:12:21 -08002592 /*
2593 * @tsk is about to have PF_EXITING set - lock out users which
2594 * expect stable threadgroup.
2595 */
Ingo Molnar780de9d2017-02-02 11:50:56 +01002596 cgroup_threadgroup_change_begin(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002597
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002598 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2599 tsk->flags |= PF_EXITING;
Ingo Molnar780de9d2017-02-02 11:50:56 +01002600 cgroup_threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002601 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002602 }
2603
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002604 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002605 /*
2606 * From now this task is not visible for group-wide signals,
2607 * see wants_signal(), do_signal_stop().
2608 */
2609 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002610
Ingo Molnar780de9d2017-02-02 11:50:56 +01002611 cgroup_threadgroup_change_end(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002612
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002613 if (!signal_pending(tsk))
2614 goto out;
2615
Oleg Nesterovf646e222011-04-27 19:18:39 +02002616 unblocked = tsk->blocked;
2617 signotset(&unblocked);
2618 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002619
Tejun Heoa8f072c2011-06-02 11:13:59 +02002620 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c1902e2011-03-23 10:37:00 +01002621 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002622 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002623out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002624 spin_unlock_irq(&tsk->sighand->siglock);
2625
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002626 /*
2627 * If group stop has completed, deliver the notification. This
2628 * should always go to the real parent of the group leader.
2629 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002630 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002631 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002632 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002633 read_unlock(&tasklist_lock);
2634 }
2635}
2636
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637EXPORT_SYMBOL(recalc_sigpending);
2638EXPORT_SYMBOL_GPL(dequeue_signal);
2639EXPORT_SYMBOL(flush_signals);
2640EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641EXPORT_SYMBOL(send_sig);
2642EXPORT_SYMBOL(send_sig_info);
2643EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645/*
2646 * System call entry points.
2647 */
2648
Randy Dunlap41c57892011-04-04 15:00:26 -07002649/**
2650 * sys_restart_syscall - restart a system call
2651 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002652SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002654 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 return restart->fn(restart);
2656}
2657
2658long do_no_restart_syscall(struct restart_block *param)
2659{
2660 return -EINTR;
2661}
2662
Oleg Nesterovb1828012011-04-27 21:56:14 +02002663static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2664{
2665 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2666 sigset_t newblocked;
2667 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002668 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002669 retarget_shared_pending(tsk, &newblocked);
2670 }
2671 tsk->blocked = *newset;
2672 recalc_sigpending();
2673}
2674
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002675/**
2676 * set_current_blocked - change current->blocked mask
2677 * @newset: new mask
2678 *
2679 * It is wrong to change ->blocked directly, this helper should be used
2680 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 */
Al Viro77097ae2012-04-27 13:58:59 -04002682void set_current_blocked(sigset_t *newset)
2683{
Al Viro77097ae2012-04-27 13:58:59 -04002684 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002685 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002686}
2687
2688void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002689{
2690 struct task_struct *tsk = current;
2691
Waiman Longc7be96a2016-12-14 15:04:10 -08002692 /*
2693 * In case the signal mask hasn't changed, there is nothing we need
2694 * to do. The current->blocked shouldn't be modified by other task.
2695 */
2696 if (sigequalsets(&tsk->blocked, newset))
2697 return;
2698
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002699 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002700 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002701 spin_unlock_irq(&tsk->sighand->siglock);
2702}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704/*
2705 * This is also useful for kernel threads that want to temporarily
2706 * (or permanently) block certain signals.
2707 *
2708 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2709 * interface happily blocks "unblockable" signals like SIGKILL
2710 * and friends.
2711 */
2712int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2713{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002714 struct task_struct *tsk = current;
2715 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002717 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002718 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002719 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 switch (how) {
2722 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002723 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 break;
2725 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002726 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 break;
2728 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002729 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 break;
2731 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002732 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002734
Al Viro77097ae2012-04-27 13:58:59 -04002735 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002736 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737}
2738
Randy Dunlap41c57892011-04-04 15:00:26 -07002739/**
2740 * sys_rt_sigprocmask - change the list of currently blocked signals
2741 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002742 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002743 * @oset: previous value of signal mask if non-null
2744 * @sigsetsize: size of sigset_t type
2745 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002746SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002747 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002750 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
2752 /* XXX: Don't preclude handling different sized sigset_t's. */
2753 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002754 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002756 old_set = current->blocked;
2757
2758 if (nset) {
2759 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2760 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2762
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002763 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002765 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002767
2768 if (oset) {
2769 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2770 return -EFAULT;
2771 }
2772
2773 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774}
2775
Al Viro322a56c2012-12-25 13:32:58 -05002776#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002777COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2778 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779{
Al Viro322a56c2012-12-25 13:32:58 -05002780 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
Al Viro322a56c2012-12-25 13:32:58 -05002782 /* XXX: Don't preclude handling different sized sigset_t's. */
2783 if (sigsetsize != sizeof(sigset_t))
2784 return -EINVAL;
2785
2786 if (nset) {
Al Viro322a56c2012-12-25 13:32:58 -05002787 sigset_t new_set;
2788 int error;
Al Viro3968cf62017-09-03 21:45:17 -04002789 if (get_compat_sigset(&new_set, nset))
Al Viro322a56c2012-12-25 13:32:58 -05002790 return -EFAULT;
Al Viro322a56c2012-12-25 13:32:58 -05002791 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2792
2793 error = sigprocmask(how, &new_set, NULL);
2794 if (error)
2795 return error;
2796 }
Dmitry V. Levinf4543222017-08-22 02:16:11 +03002797 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
Al Viro322a56c2012-12-25 13:32:58 -05002798}
2799#endif
Al Viro322a56c2012-12-25 13:32:58 -05002800
Christian Braunerb1d294c2018-08-21 22:00:02 -07002801static void do_sigpending(sigset_t *set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002804 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 &current->signal->shared_pending.signal);
2806 spin_unlock_irq(&current->sighand->siglock);
2807
2808 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002809 sigandsets(set, &current->blocked, set);
Randy Dunlap5aba0852011-04-04 14:59:31 -07002810}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Randy Dunlap41c57892011-04-04 15:00:26 -07002812/**
2813 * sys_rt_sigpending - examine a pending signal that has been raised
2814 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002815 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002816 * @sigsetsize: size of sigset_t type or larger
2817 */
Al Virofe9c1db2012-12-25 14:31:38 -05002818SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819{
Al Virofe9c1db2012-12-25 14:31:38 -05002820 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002821
2822 if (sigsetsize > sizeof(*uset))
2823 return -EINVAL;
2824
Christian Braunerb1d294c2018-08-21 22:00:02 -07002825 do_sigpending(&set);
2826
2827 if (copy_to_user(uset, &set, sigsetsize))
2828 return -EFAULT;
2829
2830 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831}
2832
Al Virofe9c1db2012-12-25 14:31:38 -05002833#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002834COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2835 compat_size_t, sigsetsize)
2836{
Al Virofe9c1db2012-12-25 14:31:38 -05002837 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002838
2839 if (sigsetsize > sizeof(*uset))
2840 return -EINVAL;
2841
Christian Braunerb1d294c2018-08-21 22:00:02 -07002842 do_sigpending(&set);
2843
2844 return put_compat_sigset(uset, &set, sigsetsize);
Al Virofe9c1db2012-12-25 14:31:38 -05002845}
2846#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002847
Eric W. Biedermancc731522017-07-16 22:36:59 -05002848enum siginfo_layout siginfo_layout(int sig, int si_code)
2849{
2850 enum siginfo_layout layout = SIL_KILL;
2851 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2852 static const struct {
2853 unsigned char limit, layout;
2854 } filter[] = {
2855 [SIGILL] = { NSIGILL, SIL_FAULT },
2856 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2857 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2858 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2859 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
Andrew Claytonc3aff082017-11-01 15:49:59 +00002860#if defined(SIGEMT) && defined(NSIGEMT)
Eric W. Biedermancc731522017-07-16 22:36:59 -05002861 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2862#endif
2863 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2864 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002865 [SIGSYS] = { NSIGSYS, SIL_SYS },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002866 };
Eric W. Biederman31931c92018-04-24 20:59:47 -05002867 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
Eric W. Biedermancc731522017-07-16 22:36:59 -05002868 layout = filter[sig].layout;
Eric W. Biederman31931c92018-04-24 20:59:47 -05002869 /* Handle the exceptions */
2870 if ((sig == SIGBUS) &&
2871 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2872 layout = SIL_FAULT_MCEERR;
2873 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2874 layout = SIL_FAULT_BNDERR;
2875#ifdef SEGV_PKUERR
2876 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2877 layout = SIL_FAULT_PKUERR;
2878#endif
2879 }
Eric W. Biedermancc731522017-07-16 22:36:59 -05002880 else if (si_code <= NSIGPOLL)
2881 layout = SIL_POLL;
2882 } else {
2883 if (si_code == SI_TIMER)
2884 layout = SIL_TIMER;
2885 else if (si_code == SI_SIGIO)
2886 layout = SIL_POLL;
2887 else if (si_code < 0)
2888 layout = SIL_RT;
Eric W. Biedermancc731522017-07-16 22:36:59 -05002889 }
2890 return layout;
2891}
2892
Al Viroce395962013-10-13 17:23:53 -04002893int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894{
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002895 if (copy_to_user(to, from , sizeof(struct siginfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 return -EFAULT;
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002897 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898}
2899
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002900#ifdef CONFIG_COMPAT
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002901int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2902 const struct siginfo *from)
2903#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2904{
2905 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2906}
2907int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2908 const struct siginfo *from, bool x32_ABI)
2909#endif
2910{
2911 struct compat_siginfo new;
2912 memset(&new, 0, sizeof(new));
2913
2914 new.si_signo = from->si_signo;
2915 new.si_errno = from->si_errno;
2916 new.si_code = from->si_code;
2917 switch(siginfo_layout(from->si_signo, from->si_code)) {
2918 case SIL_KILL:
2919 new.si_pid = from->si_pid;
2920 new.si_uid = from->si_uid;
2921 break;
2922 case SIL_TIMER:
2923 new.si_tid = from->si_tid;
2924 new.si_overrun = from->si_overrun;
2925 new.si_int = from->si_int;
2926 break;
2927 case SIL_POLL:
2928 new.si_band = from->si_band;
2929 new.si_fd = from->si_fd;
2930 break;
2931 case SIL_FAULT:
2932 new.si_addr = ptr_to_compat(from->si_addr);
2933#ifdef __ARCH_SI_TRAPNO
2934 new.si_trapno = from->si_trapno;
2935#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002936 break;
2937 case SIL_FAULT_MCEERR:
2938 new.si_addr = ptr_to_compat(from->si_addr);
2939#ifdef __ARCH_SI_TRAPNO
2940 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002941#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002942 new.si_addr_lsb = from->si_addr_lsb;
2943 break;
2944 case SIL_FAULT_BNDERR:
2945 new.si_addr = ptr_to_compat(from->si_addr);
2946#ifdef __ARCH_SI_TRAPNO
2947 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002948#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002949 new.si_lower = ptr_to_compat(from->si_lower);
2950 new.si_upper = ptr_to_compat(from->si_upper);
2951 break;
2952 case SIL_FAULT_PKUERR:
2953 new.si_addr = ptr_to_compat(from->si_addr);
2954#ifdef __ARCH_SI_TRAPNO
2955 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002956#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002957 new.si_pkey = from->si_pkey;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002958 break;
2959 case SIL_CHLD:
2960 new.si_pid = from->si_pid;
2961 new.si_uid = from->si_uid;
2962 new.si_status = from->si_status;
2963#ifdef CONFIG_X86_X32_ABI
2964 if (x32_ABI) {
2965 new._sifields._sigchld_x32._utime = from->si_utime;
2966 new._sifields._sigchld_x32._stime = from->si_stime;
2967 } else
2968#endif
2969 {
2970 new.si_utime = from->si_utime;
2971 new.si_stime = from->si_stime;
2972 }
2973 break;
2974 case SIL_RT:
2975 new.si_pid = from->si_pid;
2976 new.si_uid = from->si_uid;
2977 new.si_int = from->si_int;
2978 break;
2979 case SIL_SYS:
2980 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2981 new.si_syscall = from->si_syscall;
2982 new.si_arch = from->si_arch;
2983 break;
2984 }
2985
2986 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2987 return -EFAULT;
2988
2989 return 0;
2990}
2991
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002992int copy_siginfo_from_user32(struct siginfo *to,
2993 const struct compat_siginfo __user *ufrom)
2994{
2995 struct compat_siginfo from;
2996
2997 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
2998 return -EFAULT;
2999
3000 clear_siginfo(to);
3001 to->si_signo = from.si_signo;
3002 to->si_errno = from.si_errno;
3003 to->si_code = from.si_code;
3004 switch(siginfo_layout(from.si_signo, from.si_code)) {
3005 case SIL_KILL:
3006 to->si_pid = from.si_pid;
3007 to->si_uid = from.si_uid;
3008 break;
3009 case SIL_TIMER:
3010 to->si_tid = from.si_tid;
3011 to->si_overrun = from.si_overrun;
3012 to->si_int = from.si_int;
3013 break;
3014 case SIL_POLL:
3015 to->si_band = from.si_band;
3016 to->si_fd = from.si_fd;
3017 break;
3018 case SIL_FAULT:
3019 to->si_addr = compat_ptr(from.si_addr);
3020#ifdef __ARCH_SI_TRAPNO
3021 to->si_trapno = from.si_trapno;
3022#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003023 break;
3024 case SIL_FAULT_MCEERR:
3025 to->si_addr = compat_ptr(from.si_addr);
3026#ifdef __ARCH_SI_TRAPNO
3027 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003028#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003029 to->si_addr_lsb = from.si_addr_lsb;
3030 break;
3031 case SIL_FAULT_BNDERR:
3032 to->si_addr = compat_ptr(from.si_addr);
3033#ifdef __ARCH_SI_TRAPNO
3034 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003035#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003036 to->si_lower = compat_ptr(from.si_lower);
3037 to->si_upper = compat_ptr(from.si_upper);
3038 break;
3039 case SIL_FAULT_PKUERR:
3040 to->si_addr = compat_ptr(from.si_addr);
3041#ifdef __ARCH_SI_TRAPNO
3042 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003043#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003044 to->si_pkey = from.si_pkey;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003045 break;
3046 case SIL_CHLD:
3047 to->si_pid = from.si_pid;
3048 to->si_uid = from.si_uid;
3049 to->si_status = from.si_status;
3050#ifdef CONFIG_X86_X32_ABI
3051 if (in_x32_syscall()) {
3052 to->si_utime = from._sifields._sigchld_x32._utime;
3053 to->si_stime = from._sifields._sigchld_x32._stime;
3054 } else
3055#endif
3056 {
3057 to->si_utime = from.si_utime;
3058 to->si_stime = from.si_stime;
3059 }
3060 break;
3061 case SIL_RT:
3062 to->si_pid = from.si_pid;
3063 to->si_uid = from.si_uid;
3064 to->si_int = from.si_int;
3065 break;
3066 case SIL_SYS:
3067 to->si_call_addr = compat_ptr(from.si_call_addr);
3068 to->si_syscall = from.si_syscall;
3069 to->si_arch = from.si_arch;
3070 break;
3071 }
3072 return 0;
3073}
3074#endif /* CONFIG_COMPAT */
3075
Randy Dunlap41c57892011-04-04 15:00:26 -07003076/**
Oleg Nesterov943df142011-04-27 21:44:14 +02003077 * do_sigtimedwait - wait for queued signals specified in @which
3078 * @which: queued signals to wait for
3079 * @info: if non-null, the signal's siginfo is returned here
3080 * @ts: upper bound on process time suspension
3081 */
Al Viro1b3c8722017-05-31 04:46:17 -04003082static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003083 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02003084{
Thomas Gleixner2456e852016-12-25 11:38:40 +01003085 ktime_t *to = NULL, timeout = KTIME_MAX;
Oleg Nesterov943df142011-04-27 21:44:14 +02003086 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02003087 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003088 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02003089
3090 if (ts) {
3091 if (!timespec_valid(ts))
3092 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003093 timeout = timespec_to_ktime(*ts);
3094 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02003095 }
3096
3097 /*
3098 * Invert the set of allowed signals to get those we want to block.
3099 */
3100 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3101 signotset(&mask);
3102
3103 spin_lock_irq(&tsk->sighand->siglock);
3104 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2456e852016-12-25 11:38:40 +01003105 if (!sig && timeout) {
Oleg Nesterov943df142011-04-27 21:44:14 +02003106 /*
3107 * None ready, temporarily unblock those we're interested
3108 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02003109 * they arrive. Unblocking is always fine, we can avoid
3110 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02003111 */
3112 tsk->real_blocked = tsk->blocked;
3113 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3114 recalc_sigpending();
3115 spin_unlock_irq(&tsk->sighand->siglock);
3116
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003117 __set_current_state(TASK_INTERRUPTIBLE);
3118 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3119 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02003120 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003121 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07003122 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003123 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02003124 }
3125 spin_unlock_irq(&tsk->sighand->siglock);
3126
3127 if (sig)
3128 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003129 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02003130}
3131
3132/**
Randy Dunlap41c57892011-04-04 15:00:26 -07003133 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3134 * in @uthese
3135 * @uthese: queued signals to wait for
3136 * @uinfo: if non-null, the signal's siginfo is returned here
3137 * @uts: upper bound on process time suspension
3138 * @sigsetsize: size of sigset_t type
3139 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003140SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3141 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3142 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 sigset_t these;
3145 struct timespec ts;
3146 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02003147 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
3149 /* XXX: Don't preclude handling different sized sigset_t's. */
3150 if (sigsetsize != sizeof(sigset_t))
3151 return -EINVAL;
3152
3153 if (copy_from_user(&these, uthese, sizeof(these)))
3154 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07003155
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 if (uts) {
3157 if (copy_from_user(&ts, uts, sizeof(ts)))
3158 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 }
3160
Oleg Nesterov943df142011-04-27 21:44:14 +02003161 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
Oleg Nesterov943df142011-04-27 21:44:14 +02003163 if (ret > 0 && uinfo) {
3164 if (copy_siginfo_to_user(uinfo, &info))
3165 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 }
3167
3168 return ret;
3169}
3170
Al Viro1b3c8722017-05-31 04:46:17 -04003171#ifdef CONFIG_COMPAT
3172COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3173 struct compat_siginfo __user *, uinfo,
3174 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3175{
Al Viro1b3c8722017-05-31 04:46:17 -04003176 sigset_t s;
3177 struct timespec t;
3178 siginfo_t info;
3179 long ret;
3180
3181 if (sigsetsize != sizeof(sigset_t))
3182 return -EINVAL;
3183
Al Viro3968cf62017-09-03 21:45:17 -04003184 if (get_compat_sigset(&s, uthese))
Al Viro1b3c8722017-05-31 04:46:17 -04003185 return -EFAULT;
Al Viro1b3c8722017-05-31 04:46:17 -04003186
3187 if (uts) {
3188 if (compat_get_timespec(&t, uts))
3189 return -EFAULT;
3190 }
3191
3192 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3193
3194 if (ret > 0 && uinfo) {
3195 if (copy_siginfo_to_user32(uinfo, &info))
3196 ret = -EFAULT;
3197 }
3198
3199 return ret;
3200}
3201#endif
3202
Randy Dunlap41c57892011-04-04 15:00:26 -07003203/**
3204 * sys_kill - send a signal to a process
3205 * @pid: the PID of the process
3206 * @sig: signal to be sent
3207 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003208SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209{
3210 struct siginfo info;
3211
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06003212 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 info.si_signo = sig;
3214 info.si_errno = 0;
3215 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003216 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003217 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 return kill_something_info(sig, &info, pid);
3220}
3221
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003222static int
3223do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003224{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003225 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003226 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003227
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003228 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003229 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003230 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003231 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003232 /*
3233 * The null signal is a permissions and process existence
3234 * probe. No signal is actually delivered.
3235 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003236 if (!error && sig) {
Eric W. Biederman40b3b022018-07-21 10:45:15 -05003237 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003238 /*
3239 * If lock_task_sighand() failed we pretend the task
3240 * dies after receiving the signal. The window is tiny,
3241 * and the signal is private anyway.
3242 */
3243 if (unlikely(error == -ESRCH))
3244 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003245 }
3246 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003247 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003248
3249 return error;
3250}
3251
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003252static int do_tkill(pid_t tgid, pid_t pid, int sig)
3253{
Eric W. Biederman5f749722018-01-22 14:58:57 -06003254 struct siginfo info;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003255
Eric W. Biederman5f749722018-01-22 14:58:57 -06003256 clear_siginfo(&info);
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003257 info.si_signo = sig;
3258 info.si_errno = 0;
3259 info.si_code = SI_TKILL;
3260 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003261 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003262
3263 return do_send_specific(tgid, pid, sig, &info);
3264}
3265
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266/**
3267 * sys_tgkill - send signal to one specific thread
3268 * @tgid: the thread group ID of the thread
3269 * @pid: the PID of the thread
3270 * @sig: signal to be sent
3271 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003272 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 * exists but it's not belonging to the target process anymore. This
3274 * method solves the problem of threads exiting and PIDs getting reused.
3275 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003276SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 /* This is only valid for single tasks */
3279 if (pid <= 0 || tgid <= 0)
3280 return -EINVAL;
3281
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003282 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283}
3284
Randy Dunlap41c57892011-04-04 15:00:26 -07003285/**
3286 * sys_tkill - send signal to one specific task
3287 * @pid: the PID of the task
3288 * @sig: signal to be sent
3289 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3291 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003292SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 /* This is only valid for single tasks */
3295 if (pid <= 0)
3296 return -EINVAL;
3297
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003298 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299}
3300
Al Viro75907d42012-12-25 15:19:12 -05003301static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3302{
3303 /* Not even root can pretend to send signals from the kernel.
3304 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3305 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003306 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003307 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003308 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003309
Al Viro75907d42012-12-25 15:19:12 -05003310 info->si_signo = sig;
3311
3312 /* POSIX.1b doesn't mention process groups. */
3313 return kill_proc_info(sig, info, pid);
3314}
3315
Randy Dunlap41c57892011-04-04 15:00:26 -07003316/**
3317 * sys_rt_sigqueueinfo - send signal information to a signal
3318 * @pid: the PID of the thread
3319 * @sig: signal to be sent
3320 * @uinfo: signal info to be sent
3321 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003322SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3323 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
3325 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3327 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003328 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329}
3330
Al Viro75907d42012-12-25 15:19:12 -05003331#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003332COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3333 compat_pid_t, pid,
3334 int, sig,
3335 struct compat_siginfo __user *, uinfo)
3336{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003337 siginfo_t info;
Al Viro75907d42012-12-25 15:19:12 -05003338 int ret = copy_siginfo_from_user32(&info, uinfo);
3339 if (unlikely(ret))
3340 return ret;
3341 return do_rt_sigqueueinfo(pid, sig, &info);
3342}
3343#endif
Al Viro75907d42012-12-25 15:19:12 -05003344
Al Viro9aae8fc2012-12-24 23:12:04 -05003345static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003346{
3347 /* This is only valid for single tasks */
3348 if (pid <= 0 || tgid <= 0)
3349 return -EINVAL;
3350
3351 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003352 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3353 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003354 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3355 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003356 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003357
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003358 info->si_signo = sig;
3359
3360 return do_send_specific(tgid, pid, sig, info);
3361}
3362
3363SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3364 siginfo_t __user *, uinfo)
3365{
3366 siginfo_t info;
3367
3368 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3369 return -EFAULT;
3370
3371 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3372}
3373
Al Viro9aae8fc2012-12-24 23:12:04 -05003374#ifdef CONFIG_COMPAT
3375COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3376 compat_pid_t, tgid,
3377 compat_pid_t, pid,
3378 int, sig,
3379 struct compat_siginfo __user *, uinfo)
3380{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003381 siginfo_t info;
Al Viro9aae8fc2012-12-24 23:12:04 -05003382
3383 if (copy_siginfo_from_user32(&info, uinfo))
3384 return -EFAULT;
3385 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3386}
3387#endif
3388
Oleg Nesterov03417292014-06-06 14:36:53 -07003389/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003390 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003391 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003392void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003393{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003394 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003395 current->sighand->action[sig - 1].sa.sa_handler = action;
3396 if (action == SIG_IGN) {
3397 sigset_t mask;
3398
3399 sigemptyset(&mask);
3400 sigaddset(&mask, sig);
3401
3402 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3403 flush_sigqueue_mask(&mask, &current->pending);
3404 recalc_sigpending();
3405 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003406 spin_unlock_irq(&current->sighand->siglock);
3407}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003408EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003409
Dmitry Safonov68463512016-09-05 16:33:08 +03003410void __weak sigaction_compat_abi(struct k_sigaction *act,
3411 struct k_sigaction *oact)
3412{
3413}
3414
Oleg Nesterov88531f72006-03-28 16:11:24 -08003415int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003417 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 struct k_sigaction *k;
George Anzinger71fabd5e2006-01-08 01:02:48 -08003419 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003421 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 return -EINVAL;
3423
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003424 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003426 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 if (oact)
3428 *oact = *k;
3429
Dmitry Safonov68463512016-09-05 16:33:08 +03003430 sigaction_compat_abi(act, oact);
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003433 sigdelsetmask(&act->sa.sa_mask,
3434 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003435 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 /*
3437 * POSIX 3.3.1.3:
3438 * "Setting a signal action to SIG_IGN for a signal that is
3439 * pending shall cause the pending signal to be discarded,
3440 * whether or not it is blocked."
3441 *
3442 * "Setting a signal action to SIG_DFL for a signal that is
3443 * pending and whose default action is to ignore the signal
3444 * (for example, SIGCHLD), shall cause the pending signal to
3445 * be discarded, whether or not it is blocked"
3446 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003447 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd5e2006-01-08 01:02:48 -08003448 sigemptyset(&mask);
3449 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003450 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3451 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003452 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 }
3455
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003456 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 return 0;
3458}
3459
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003460static int
Al Virobcfe8ad2017-05-27 00:29:34 -04003461do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462{
Al Virobcfe8ad2017-05-27 00:29:34 -04003463 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
Al Virobcfe8ad2017-05-27 00:29:34 -04003465 if (oss) {
3466 memset(oss, 0, sizeof(stack_t));
3467 oss->ss_sp = (void __user *) t->sas_ss_sp;
3468 oss->ss_size = t->sas_ss_size;
3469 oss->ss_flags = sas_ss_flags(sp) |
3470 (current->sas_ss_flags & SS_FLAG_BITS);
3471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
Al Virobcfe8ad2017-05-27 00:29:34 -04003473 if (ss) {
3474 void __user *ss_sp = ss->ss_sp;
3475 size_t ss_size = ss->ss_size;
3476 unsigned ss_flags = ss->ss_flags;
Stas Sergeev407bc162016-04-14 23:20:03 +03003477 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
Al Virobcfe8ad2017-05-27 00:29:34 -04003479 if (unlikely(on_sig_stack(sp)))
3480 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481
Stas Sergeev407bc162016-04-14 23:20:03 +03003482 ss_mode = ss_flags & ~SS_FLAG_BITS;
Al Virobcfe8ad2017-05-27 00:29:34 -04003483 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3484 ss_mode != 0))
3485 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
Stas Sergeev407bc162016-04-14 23:20:03 +03003487 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 ss_size = 0;
3489 ss_sp = NULL;
3490 } else {
Al Virobcfe8ad2017-05-27 00:29:34 -04003491 if (unlikely(ss_size < MINSIGSTKSZ))
3492 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 }
3494
Al Virobcfe8ad2017-05-27 00:29:34 -04003495 t->sas_ss_sp = (unsigned long) ss_sp;
3496 t->sas_ss_size = ss_size;
3497 t->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003499 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
Al Virobcfe8ad2017-05-27 00:29:34 -04003501
Al Viro6bf9adf2012-12-14 14:09:47 -05003502SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3503{
Al Virobcfe8ad2017-05-27 00:29:34 -04003504 stack_t new, old;
3505 int err;
3506 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3507 return -EFAULT;
3508 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3509 current_user_stack_pointer());
3510 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3511 err = -EFAULT;
3512 return err;
Al Viro6bf9adf2012-12-14 14:09:47 -05003513}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
Al Viro5c495742012-11-18 15:29:16 -05003515int restore_altstack(const stack_t __user *uss)
3516{
Al Virobcfe8ad2017-05-27 00:29:34 -04003517 stack_t new;
3518 if (copy_from_user(&new, uss, sizeof(stack_t)))
3519 return -EFAULT;
3520 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
Al Viro5c495742012-11-18 15:29:16 -05003521 /* squash all but EFAULT for now */
Al Virobcfe8ad2017-05-27 00:29:34 -04003522 return 0;
Al Viro5c495742012-11-18 15:29:16 -05003523}
3524
Al Viroc40702c2012-11-20 14:24:26 -05003525int __save_altstack(stack_t __user *uss, unsigned long sp)
3526{
3527 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003528 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3529 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003530 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003531 if (err)
3532 return err;
3533 if (t->sas_ss_flags & SS_AUTODISARM)
3534 sas_ss_reset(t);
3535 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003536}
3537
Al Viro90268432012-12-14 14:47:53 -05003538#ifdef CONFIG_COMPAT
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003539static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3540 compat_stack_t __user *uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003541{
3542 stack_t uss, uoss;
3543 int ret;
Al Viro90268432012-12-14 14:47:53 -05003544
3545 if (uss_ptr) {
3546 compat_stack_t uss32;
Al Viro90268432012-12-14 14:47:53 -05003547 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3548 return -EFAULT;
3549 uss.ss_sp = compat_ptr(uss32.ss_sp);
3550 uss.ss_flags = uss32.ss_flags;
3551 uss.ss_size = uss32.ss_size;
3552 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003553 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
Al Viro90268432012-12-14 14:47:53 -05003554 compat_user_stack_pointer());
Al Viro90268432012-12-14 14:47:53 -05003555 if (ret >= 0 && uoss_ptr) {
Al Virobcfe8ad2017-05-27 00:29:34 -04003556 compat_stack_t old;
3557 memset(&old, 0, sizeof(old));
3558 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3559 old.ss_flags = uoss.ss_flags;
3560 old.ss_size = uoss.ss_size;
3561 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
Al Viro90268432012-12-14 14:47:53 -05003562 ret = -EFAULT;
3563 }
3564 return ret;
3565}
3566
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003567COMPAT_SYSCALL_DEFINE2(sigaltstack,
3568 const compat_stack_t __user *, uss_ptr,
3569 compat_stack_t __user *, uoss_ptr)
3570{
3571 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3572}
3573
Al Viro90268432012-12-14 14:47:53 -05003574int compat_restore_altstack(const compat_stack_t __user *uss)
3575{
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003576 int err = do_compat_sigaltstack(uss, NULL);
Al Viro90268432012-12-14 14:47:53 -05003577 /* squash all but -EFAULT for now */
3578 return err == -EFAULT ? err : 0;
3579}
Al Viroc40702c2012-11-20 14:24:26 -05003580
3581int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3582{
Stas Sergeev441398d2017-02-27 14:27:25 -08003583 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003584 struct task_struct *t = current;
Stas Sergeev441398d2017-02-27 14:27:25 -08003585 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3586 &uss->ss_sp) |
3587 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003588 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev441398d2017-02-27 14:27:25 -08003589 if (err)
3590 return err;
3591 if (t->sas_ss_flags & SS_AUTODISARM)
3592 sas_ss_reset(t);
3593 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003594}
Al Viro90268432012-12-14 14:47:53 -05003595#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596
3597#ifdef __ARCH_WANT_SYS_SIGPENDING
3598
Randy Dunlap41c57892011-04-04 15:00:26 -07003599/**
3600 * sys_sigpending - examine pending signals
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003601 * @uset: where mask of pending signal is returned
Randy Dunlap41c57892011-04-04 15:00:26 -07003602 */
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003603SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003605 sigset_t set;
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003606
3607 if (sizeof(old_sigset_t) > sizeof(*uset))
3608 return -EINVAL;
3609
Christian Braunerb1d294c2018-08-21 22:00:02 -07003610 do_sigpending(&set);
3611
3612 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3613 return -EFAULT;
3614
3615 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616}
3617
Al Viro8f136212017-05-31 04:42:07 -04003618#ifdef CONFIG_COMPAT
3619COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3620{
3621 sigset_t set;
Christian Braunerb1d294c2018-08-21 22:00:02 -07003622
3623 do_sigpending(&set);
3624
3625 return put_user(set.sig[0], set32);
Al Viro8f136212017-05-31 04:42:07 -04003626}
3627#endif
3628
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629#endif
3630
3631#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003632/**
3633 * sys_sigprocmask - examine and change blocked signals
3634 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003635 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003636 * @oset: previous value of signal mask if non-null
3637 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003638 * Some platforms have their own version with special arguments;
3639 * others support only sys_rt_sigprocmask.
3640 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641
Oleg Nesterovb013c392011-04-28 11:36:20 +02003642SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003643 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003646 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647
Oleg Nesterovb013c392011-04-28 11:36:20 +02003648 old_set = current->blocked.sig[0];
3649
3650 if (nset) {
3651 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3652 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003654 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003658 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 break;
3660 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003661 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 break;
3663 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003664 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003666 default:
3667 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 }
3669
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003670 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003672
3673 if (oset) {
3674 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3675 return -EFAULT;
3676 }
3677
3678 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679}
3680#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3681
Al Viroeaca6ea2012-11-25 23:12:10 -05003682#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003683/**
3684 * sys_rt_sigaction - alter an action taken by a process
3685 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003686 * @act: new sigaction
3687 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003688 * @sigsetsize: size of sigset_t type
3689 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003690SYSCALL_DEFINE4(rt_sigaction, int, sig,
3691 const struct sigaction __user *, act,
3692 struct sigaction __user *, oact,
3693 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694{
3695 struct k_sigaction new_sa, old_sa;
Christian Braunerd8f993b2018-08-21 22:00:07 -07003696 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
3698 /* XXX: Don't preclude handling different sized sigset_t's. */
3699 if (sigsetsize != sizeof(sigset_t))
Christian Braunerd8f993b2018-08-21 22:00:07 -07003700 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701
Christian Braunerd8f993b2018-08-21 22:00:07 -07003702 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3703 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
Christian Braunerd8f993b2018-08-21 22:00:07 -07003706 if (ret)
3707 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708
Christian Braunerd8f993b2018-08-21 22:00:07 -07003709 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3710 return -EFAULT;
3711
3712 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713}
Al Viro08d32fe2012-12-25 18:38:15 -05003714#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003715COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3716 const struct compat_sigaction __user *, act,
3717 struct compat_sigaction __user *, oact,
3718 compat_size_t, sigsetsize)
3719{
3720 struct k_sigaction new_ka, old_ka;
Al Viro08d32fe2012-12-25 18:38:15 -05003721#ifdef __ARCH_HAS_SA_RESTORER
3722 compat_uptr_t restorer;
3723#endif
3724 int ret;
3725
3726 /* XXX: Don't preclude handling different sized sigset_t's. */
3727 if (sigsetsize != sizeof(compat_sigset_t))
3728 return -EINVAL;
3729
3730 if (act) {
3731 compat_uptr_t handler;
3732 ret = get_user(handler, &act->sa_handler);
3733 new_ka.sa.sa_handler = compat_ptr(handler);
3734#ifdef __ARCH_HAS_SA_RESTORER
3735 ret |= get_user(restorer, &act->sa_restorer);
3736 new_ka.sa.sa_restorer = compat_ptr(restorer);
3737#endif
Al Viro3968cf62017-09-03 21:45:17 -04003738 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003739 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003740 if (ret)
3741 return -EFAULT;
Al Viro08d32fe2012-12-25 18:38:15 -05003742 }
3743
3744 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3745 if (!ret && oact) {
Al Viro08d32fe2012-12-25 18:38:15 -05003746 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3747 &oact->sa_handler);
Dmitry V. Levinf4543222017-08-22 02:16:11 +03003748 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3749 sizeof(oact->sa_mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003750 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003751#ifdef __ARCH_HAS_SA_RESTORER
3752 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3753 &oact->sa_restorer);
3754#endif
3755 }
3756 return ret;
3757}
3758#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003759#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Al Viro495dfbf2012-12-25 19:09:45 -05003761#ifdef CONFIG_OLD_SIGACTION
3762SYSCALL_DEFINE3(sigaction, int, sig,
3763 const struct old_sigaction __user *, act,
3764 struct old_sigaction __user *, oact)
3765{
3766 struct k_sigaction new_ka, old_ka;
3767 int ret;
3768
3769 if (act) {
3770 old_sigset_t mask;
3771 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3772 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3773 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3774 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3775 __get_user(mask, &act->sa_mask))
3776 return -EFAULT;
3777#ifdef __ARCH_HAS_KA_RESTORER
3778 new_ka.ka_restorer = NULL;
3779#endif
3780 siginitset(&new_ka.sa.sa_mask, mask);
3781 }
3782
3783 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3784
3785 if (!ret && oact) {
3786 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3787 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3788 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3789 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3790 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3791 return -EFAULT;
3792 }
3793
3794 return ret;
3795}
3796#endif
3797#ifdef CONFIG_COMPAT_OLD_SIGACTION
3798COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3799 const struct compat_old_sigaction __user *, act,
3800 struct compat_old_sigaction __user *, oact)
3801{
3802 struct k_sigaction new_ka, old_ka;
3803 int ret;
3804 compat_old_sigset_t mask;
3805 compat_uptr_t handler, restorer;
3806
3807 if (act) {
3808 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3809 __get_user(handler, &act->sa_handler) ||
3810 __get_user(restorer, &act->sa_restorer) ||
3811 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3812 __get_user(mask, &act->sa_mask))
3813 return -EFAULT;
3814
3815#ifdef __ARCH_HAS_KA_RESTORER
3816 new_ka.ka_restorer = NULL;
3817#endif
3818 new_ka.sa.sa_handler = compat_ptr(handler);
3819 new_ka.sa.sa_restorer = compat_ptr(restorer);
3820 siginitset(&new_ka.sa.sa_mask, mask);
3821 }
3822
3823 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3824
3825 if (!ret && oact) {
3826 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3827 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3828 &oact->sa_handler) ||
3829 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3830 &oact->sa_restorer) ||
3831 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3832 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3833 return -EFAULT;
3834 }
3835 return ret;
3836}
3837#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
Fabian Frederickf6187762014-06-04 16:11:12 -07003839#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
3841/*
3842 * For backwards compatibility. Functionality superseded by sigprocmask.
3843 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003844SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845{
3846 /* SMP safe */
3847 return current->blocked.sig[0];
3848}
3849
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003850SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003852 int old = current->blocked.sig[0];
3853 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003855 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003856 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857
3858 return old;
3859}
Fabian Frederickf6187762014-06-04 16:11:12 -07003860#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
3862#ifdef __ARCH_WANT_SYS_SIGNAL
3863/*
3864 * For backwards compatibility. Functionality superseded by sigaction.
3865 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003866SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
3868 struct k_sigaction new_sa, old_sa;
3869 int ret;
3870
3871 new_sa.sa.sa_handler = handler;
3872 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003873 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874
3875 ret = do_sigaction(sig, &new_sa, &old_sa);
3876
3877 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3878}
3879#endif /* __ARCH_WANT_SYS_SIGNAL */
3880
3881#ifdef __ARCH_WANT_SYS_PAUSE
3882
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003883SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003885 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003886 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003887 schedule();
3888 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 return -ERESTARTNOHAND;
3890}
3891
3892#endif
3893
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003894static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003895{
Al Viro68f3f162012-05-21 21:42:32 -04003896 current->saved_sigmask = current->blocked;
3897 set_current_blocked(set);
3898
Sasha Levin823dd322016-02-05 15:36:05 -08003899 while (!signal_pending(current)) {
3900 __set_current_state(TASK_INTERRUPTIBLE);
3901 schedule();
3902 }
Al Viro68f3f162012-05-21 21:42:32 -04003903 set_restore_sigmask();
3904 return -ERESTARTNOHAND;
3905}
Al Viro68f3f162012-05-21 21:42:32 -04003906
Randy Dunlap41c57892011-04-04 15:00:26 -07003907/**
3908 * sys_rt_sigsuspend - replace the signal mask for a value with the
3909 * @unewset value until a signal is received
3910 * @unewset: new signal mask value
3911 * @sigsetsize: size of sigset_t type
3912 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003913SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003914{
3915 sigset_t newset;
3916
3917 /* XXX: Don't preclude handling different sized sigset_t's. */
3918 if (sigsetsize != sizeof(sigset_t))
3919 return -EINVAL;
3920
3921 if (copy_from_user(&newset, unewset, sizeof(newset)))
3922 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003923 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003924}
Al Viroad4b65a2012-12-24 21:43:56 -05003925
3926#ifdef CONFIG_COMPAT
3927COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3928{
Al Viroad4b65a2012-12-24 21:43:56 -05003929 sigset_t newset;
Al Viroad4b65a2012-12-24 21:43:56 -05003930
3931 /* XXX: Don't preclude handling different sized sigset_t's. */
3932 if (sigsetsize != sizeof(sigset_t))
3933 return -EINVAL;
3934
Al Viro3968cf62017-09-03 21:45:17 -04003935 if (get_compat_sigset(&newset, unewset))
Al Viroad4b65a2012-12-24 21:43:56 -05003936 return -EFAULT;
Al Viroad4b65a2012-12-24 21:43:56 -05003937 return sigsuspend(&newset);
Al Viroad4b65a2012-12-24 21:43:56 -05003938}
3939#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003940
Al Viro0a0e8cd2012-12-25 16:04:12 -05003941#ifdef CONFIG_OLD_SIGSUSPEND
3942SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3943{
3944 sigset_t blocked;
3945 siginitset(&blocked, mask);
3946 return sigsuspend(&blocked);
3947}
3948#endif
3949#ifdef CONFIG_OLD_SIGSUSPEND3
3950SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3951{
3952 sigset_t blocked;
3953 siginitset(&blocked, mask);
3954 return sigsuspend(&blocked);
3955}
3956#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003958__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003959{
3960 return NULL;
3961}
3962
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963void __init signals_init(void)
3964{
Helge Deller41b27152016-03-22 14:27:54 -07003965 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3966 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3967 != offsetof(struct siginfo, _sifields._pad));
Eric W. Biedermanaba1be22017-07-19 21:23:15 -05003968 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
Helge Deller41b27152016-03-22 14:27:54 -07003969
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003970 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003972
3973#ifdef CONFIG_KGDB_KDB
3974#include <linux/kdb.h>
3975/*
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003976 * kdb_send_sig - Allows kdb to send signals without exposing
Jason Wessel67fc4e02010-05-20 21:04:21 -05003977 * signal internals. This function checks if the required locks are
3978 * available before calling the main signal code, to avoid kdb
3979 * deadlocks.
3980 */
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003981void kdb_send_sig(struct task_struct *t, int sig)
Jason Wessel67fc4e02010-05-20 21:04:21 -05003982{
3983 static struct task_struct *kdb_prev_t;
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003984 int new_t, ret;
Jason Wessel67fc4e02010-05-20 21:04:21 -05003985 if (!spin_trylock(&t->sighand->siglock)) {
3986 kdb_printf("Can't do kill command now.\n"
3987 "The sigmask lock is held somewhere else in "
3988 "kernel, try again later\n");
3989 return;
3990 }
Jason Wessel67fc4e02010-05-20 21:04:21 -05003991 new_t = kdb_prev_t != t;
3992 kdb_prev_t = t;
3993 if (t->state != TASK_RUNNING && new_t) {
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003994 spin_unlock(&t->sighand->siglock);
Jason Wessel67fc4e02010-05-20 21:04:21 -05003995 kdb_printf("Process is not RUNNING, sending a signal from "
3996 "kdb risks deadlock\n"
3997 "on the run queue locks. "
3998 "The signal has _not_ been sent.\n"
3999 "Reissue the kill command if you want to risk "
4000 "the deadlock.\n");
4001 return;
4002 }
Eric W. Biedermanb2139842018-07-20 15:49:17 -05004003 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004004 spin_unlock(&t->sighand->siglock);
4005 if (ret)
Jason Wessel67fc4e02010-05-20 21:04:21 -05004006 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4007 sig, t->pid);
4008 else
4009 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4010}
4011#endif /* CONFIG_KGDB_KDB */