blob: d7d1adf735f44eaae535591e4af571f5d8946758 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010016#include <linux/sched/mm.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010017#include <linux/sched/user.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010018#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010019#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010020#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010021#include <linux/sched/cputime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/fs.h>
23#include <linux/tty.h>
24#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070025#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/security.h>
27#include <linux/syscalls.h>
28#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070030#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090031#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070032#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080033#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080034#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080035#include <linux/pid_namespace.h>
36#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080037#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053038#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050039#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000040#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070041#include <linux/compiler.h>
Christoph Hellwig31ea70e2017-06-03 21:01:00 +020042#include <linux/posix-timers.h>
Miroslav Benes43347d52017-11-15 14:50:13 +010043#include <linux/livepatch.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070044
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050045#define CREATE_TRACE_POINTS
46#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/param.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unistd.h>
51#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010052#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040053#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/*
56 * SLAB caches for signal bits.
57 */
58
Christoph Lametere18b8902006-12-06 20:33:20 -080059static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090061int print_fatal_signals __read_mostly;
62
Roland McGrath35de2542008-07-25 19:45:51 -070063static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070064{
Roland McGrath35de2542008-07-25 19:45:51 -070065 return t->sighand->action[sig - 1].sa.sa_handler;
66}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070067
Christian Braunere4a8b4e2018-08-21 22:00:15 -070068static inline bool sig_handler_ignored(void __user *handler, int sig)
Roland McGrath35de2542008-07-25 19:45:51 -070069{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070070 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070071 return handler == SIG_IGN ||
Christian Braunere4a8b4e2018-08-21 22:00:15 -070072 (handler == SIG_DFL && sig_kernel_ignore(sig));
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070073}
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Christian Brauner41aaa482018-08-21 22:00:19 -070075static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Roland McGrath35de2542008-07-25 19:45:51 -070077 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Oleg Nesterovf008faf2009-04-02 16:58:02 -070079 handler = sig_handler(t, sig);
80
Eric W. Biederman86989c42018-07-19 19:47:27 -050081 /* SIGKILL and SIGSTOP may not be sent to the global init */
82 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
83 return true;
84
Oleg Nesterovf008faf2009-04-02 16:58:02 -070085 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterovac253852017-11-17 15:30:04 -080086 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Christian Brauner41aaa482018-08-21 22:00:19 -070087 return true;
Oleg Nesterovf008faf2009-04-02 16:58:02 -070088
89 return sig_handler_ignored(handler, sig);
90}
91
Christian Brauner6a0cdcd2018-08-21 22:00:23 -070092static bool sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070093{
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /*
95 * Blocked signals are never ignored, since the
96 * signal handler may change by the time it is
97 * unblocked.
98 */
Roland McGrath325d22d2007-11-12 15:41:55 -080099 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700100 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800102 /*
103 * Tracers may want to know about even ignored signal unless it
104 * is SIGKILL which can't be reported anyway but can be ignored
105 * by SIGNAL_UNKILLABLE task.
106 */
107 if (t->ptrace && sig != SIGKILL)
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700108 return false;
Roland McGrath35de2542008-07-25 19:45:51 -0700109
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800110 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
113/*
114 * Re-calculate pending state from the set of locally pending
115 * signals, globally pending signals, and blocked signals.
116 */
Christian Brauner938696a2018-08-21 22:00:27 -0700117static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
119 unsigned long ready;
120 long i;
121
122 switch (_NSIG_WORDS) {
123 default:
124 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
125 ready |= signal->sig[i] &~ blocked->sig[i];
126 break;
127
128 case 4: ready = signal->sig[3] &~ blocked->sig[3];
129 ready |= signal->sig[2] &~ blocked->sig[2];
130 ready |= signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
132 break;
133
134 case 2: ready = signal->sig[1] &~ blocked->sig[1];
135 ready |= signal->sig[0] &~ blocked->sig[0];
136 break;
137
138 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 }
140 return ready != 0;
141}
142
143#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
144
Christian Brauner09ae8542018-08-21 22:00:30 -0700145static bool recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200147 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 set_tsk_thread_flag(t, TIF_SIGPENDING);
Christian Brauner09ae8542018-08-21 22:00:30 -0700151 return true;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700152 }
Christian Brauner09ae8542018-08-21 22:00:30 -0700153
Roland McGrathb74d0de2007-06-06 03:59:00 -0700154 /*
155 * We must never clear the flag in another thread, or in current
156 * when it's possible the current syscall is returning -ERESTART*.
157 * So we don't clear it here, and only callers who know they should do.
158 */
Christian Brauner09ae8542018-08-21 22:00:30 -0700159 return false;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700160}
161
162/*
163 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
164 * This is superfluous when called on current, the wakeup is a harmless no-op.
165 */
166void recalc_sigpending_and_wake(struct task_struct *t)
167{
168 if (recalc_sigpending_tsk(t))
169 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
172void recalc_sigpending(void)
173{
Miroslav Benes43347d52017-11-15 14:50:13 +0100174 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
175 !klp_patch_pending(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700176 clear_thread_flag(TIF_SIGPENDING);
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Eric W. Biederman088fe472018-07-23 17:26:49 -0500180void calculate_sigpending(void)
181{
182 /* Have any signals or users of TIF_SIGPENDING been delayed
183 * until after fork?
184 */
185 spin_lock_irq(&current->sighand->siglock);
186 set_tsk_thread_flag(current, TIF_SIGPENDING);
187 recalc_sigpending();
188 spin_unlock_irq(&current->sighand->siglock);
189}
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/* Given the mask, find the first available signal that should be serviced. */
192
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800193#define SYNCHRONOUS_MASK \
194 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500195 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800196
Davide Libenzifba2afa2007-05-10 22:23:13 -0700197int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 unsigned long i, *s, *m, x;
200 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 s = pending->signal.sig;
203 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800204
205 /*
206 * Handle the first word specially: it contains the
207 * synchronous signals that need to be dequeued first.
208 */
209 x = *s &~ *m;
210 if (x) {
211 if (x & SYNCHRONOUS_MASK)
212 x &= SYNCHRONOUS_MASK;
213 sig = ffz(~x) + 1;
214 return sig;
215 }
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 switch (_NSIG_WORDS) {
218 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800219 for (i = 1; i < _NSIG_WORDS; ++i) {
220 x = *++s &~ *++m;
221 if (!x)
222 continue;
223 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 break;
227
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800228 case 2:
229 x = s[1] &~ m[1];
230 if (!x)
231 break;
232 sig = ffz(~x) + _NSIG_BPW + 1;
233 break;
234
235 case 1:
236 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 break;
238 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 return sig;
241}
242
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900243static inline void print_dropped_signal(int sig)
244{
245 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
246
247 if (!print_fatal_signals)
248 return;
249
250 if (!__ratelimit(&ratelimit_state))
251 return;
252
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700253 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900254 current->comm, current->pid, sig);
255}
256
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100257/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200258 * task_set_jobctl_pending - set jobctl pending bits
259 * @task: target task
260 * @mask: pending bits to set
261 *
262 * Clear @mask from @task->jobctl. @mask must be subset of
263 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
264 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
265 * cleared. If @task is already being killed or exiting, this function
266 * becomes noop.
267 *
268 * CONTEXT:
269 * Must be called with @task->sighand->siglock held.
270 *
271 * RETURNS:
272 * %true if @mask is set, %false if made noop because @task was dying.
273 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700274bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200275{
276 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
277 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
278 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
279
280 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
281 return false;
282
283 if (mask & JOBCTL_STOP_SIGMASK)
284 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
285
286 task->jobctl |= mask;
287 return true;
288}
289
290/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200291 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100292 * @task: target task
293 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200294 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
295 * Clear it and wake up the ptracer. Note that we don't need any further
296 * locking. @task->siglock guarantees that @task->parent points to the
297 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100298 *
299 * CONTEXT:
300 * Must be called with @task->sighand->siglock held.
301 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200302void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100303{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200304 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
305 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700306 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200307 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100308 }
309}
310
311/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200312 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100313 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200314 * @mask: pending bits to clear
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100315 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200316 * Clear @mask from @task->jobctl. @mask must be subset of
317 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
318 * STOP bits are cleared together.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100319 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200320 * If clearing of @mask leaves no stop or trap pending, this function calls
321 * task_clear_jobctl_trapping().
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100322 *
323 * CONTEXT:
324 * Must be called with @task->sighand->siglock held.
325 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700326void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100327{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200328 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
329
330 if (mask & JOBCTL_STOP_PENDING)
331 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
332
333 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200334
335 if (!(task->jobctl & JOBCTL_PENDING_MASK))
336 task_clear_jobctl_trapping(task);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100337}
338
339/**
340 * task_participate_group_stop - participate in a group stop
341 * @task: task participating in a group stop
342 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200343 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100344 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200345 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100346 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100347 *
348 * CONTEXT:
349 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100350 *
351 * RETURNS:
352 * %true if group stop completion should be notified to the parent, %false
353 * otherwise.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100354 */
355static bool task_participate_group_stop(struct task_struct *task)
356{
357 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200358 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100359
Tejun Heoa8f072c2011-06-02 11:13:59 +0200360 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100361
Tejun Heo3759a0d2011-06-02 11:14:00 +0200362 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100363
364 if (!consume)
365 return false;
366
367 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
368 sig->group_stop_count--;
369
Tejun Heo244056f2011-03-23 10:37:01 +0100370 /*
371 * Tell the caller to notify completion iff we are entering into a
372 * fresh group stop. Read comment in do_signal_stop() for details.
373 */
374 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800375 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100376 return true;
377 }
378 return false;
379}
380
Eric W. Biederman924de3b2018-07-23 13:38:00 -0500381void task_join_group_stop(struct task_struct *task)
382{
383 /* Have the new thread join an on-going signal group stop */
384 unsigned long jobctl = current->jobctl;
385 if (jobctl & JOBCTL_STOP_PENDING) {
386 struct signal_struct *sig = current->signal;
387 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
388 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
389 if (task_set_jobctl_pending(task, signr | gstop)) {
390 sig->group_stop_count++;
391 }
392 }
393}
394
David Howellsc69e8d92008-11-14 10:39:19 +1100395/*
396 * allocate a new signal queue record
397 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700398 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100399 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900400static struct sigqueue *
401__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800404 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800406 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000407 * Protect access to @t credentials. This can go away when all
408 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800409 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000410 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100411 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800412 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000413 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800416 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800417 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900419 } else {
420 print_dropped_signal(sig);
421 }
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800424 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100425 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 } else {
427 INIT_LIST_HEAD(&q->list);
428 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100429 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
David Howellsd84f4f92008-11-14 10:39:23 +1100431
432 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Andrew Morton514a01b2006-02-03 03:04:41 -0800435static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 if (q->flags & SIGQUEUE_PREALLOC)
438 return;
439 atomic_dec(&q->user->sigpending);
440 free_uid(q->user);
441 kmem_cache_free(sigqueue_cachep, q);
442}
443
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800444void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 struct sigqueue *q;
447
448 sigemptyset(&queue->signal);
449 while (!list_empty(&queue->list)) {
450 q = list_entry(queue->list.next, struct sigqueue , list);
451 list_del_init(&q->list);
452 __sigqueue_free(q);
453 }
454}
455
456/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400457 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800459void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 unsigned long flags;
462
463 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400464 clear_tsk_thread_flag(t, TIF_SIGPENDING);
465 flush_sigqueue(&t->pending);
466 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 spin_unlock_irqrestore(&t->sighand->siglock, flags);
468}
469
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500470#ifdef CONFIG_POSIX_TIMERS
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400471static void __flush_itimer_signals(struct sigpending *pending)
472{
473 sigset_t signal, retain;
474 struct sigqueue *q, *n;
475
476 signal = pending->signal;
477 sigemptyset(&retain);
478
479 list_for_each_entry_safe(q, n, &pending->list, list) {
480 int sig = q->info.si_signo;
481
482 if (likely(q->info.si_code != SI_TIMER)) {
483 sigaddset(&retain, sig);
484 } else {
485 sigdelset(&signal, sig);
486 list_del_init(&q->list);
487 __sigqueue_free(q);
488 }
489 }
490
491 sigorsets(&pending->signal, &signal, &retain);
492}
493
494void flush_itimer_signals(void)
495{
496 struct task_struct *tsk = current;
497 unsigned long flags;
498
499 spin_lock_irqsave(&tsk->sighand->siglock, flags);
500 __flush_itimer_signals(&tsk->pending);
501 __flush_itimer_signals(&tsk->signal->shared_pending);
502 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
503}
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500504#endif
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400505
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700506void ignore_signals(struct task_struct *t)
507{
508 int i;
509
510 for (i = 0; i < _NSIG; ++i)
511 t->sighand->action[i].sa.sa_handler = SIG_IGN;
512
513 flush_signals(t);
514}
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 * Flush all handlers for a task.
518 */
519
520void
521flush_signal_handlers(struct task_struct *t, int force_default)
522{
523 int i;
524 struct k_sigaction *ka = &t->sighand->action[0];
525 for (i = _NSIG ; i != 0 ; i--) {
526 if (force_default || ka->sa.sa_handler != SIG_IGN)
527 ka->sa.sa_handler = SIG_DFL;
528 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700529#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700530 ka->sa.sa_restorer = NULL;
531#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 sigemptyset(&ka->sa.sa_mask);
533 ka++;
534 }
535}
536
Christian Brauner67a48a22018-08-21 22:00:34 -0700537bool unhandled_signal(struct task_struct *tsk, int sig)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200538{
Roland McGrath445a91d2008-07-25 19:45:52 -0700539 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700540 if (is_global_init(tsk))
Christian Brauner67a48a22018-08-21 22:00:34 -0700541 return true;
542
Roland McGrath445a91d2008-07-25 19:45:52 -0700543 if (handler != SIG_IGN && handler != SIG_DFL)
Christian Brauner67a48a22018-08-21 22:00:34 -0700544 return false;
545
Tejun Heoa288eec2011-06-17 16:50:37 +0200546 /* if ptraced, let the tracer determine */
547 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200548}
549
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500550static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
551 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /*
556 * Collect the siginfo appropriate to this signal. Check if
557 * there is another siginfo for the same signal.
558 */
559 list_for_each_entry(q, &list->list, list) {
560 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700561 if (first)
562 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 first = q;
564 }
565 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700566
567 sigdelset(&list->signal, sig);
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700570still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 list_del_init(&first->list);
572 copy_siginfo(info, &first->info);
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500573
574 *resched_timer =
575 (first->flags & SIGQUEUE_PREALLOC) &&
576 (info->si_code == SI_TIMER) &&
577 (info->si_sys_private);
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700581 /*
582 * Ok, it wasn't in the queue. This must be
583 * a fast-pathed signal or we must have been
584 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 */
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600586 clear_siginfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 info->si_signo = sig;
588 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800589 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 info->si_pid = 0;
591 info->si_uid = 0;
592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593}
594
595static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500596 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Roland McGrath27d91e02006-09-29 02:00:31 -0700598 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800600 if (sig)
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500601 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return sig;
603}
604
605/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700606 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 * expected to free it.
608 *
609 * All callers have to hold the siglock.
610 */
611int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
612{
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500613 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700614 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000615
616 /* We only dequeue private signals from ourselves, we don't let
617 * signalfd steal them
618 */
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500619 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800620 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500622 mask, info, &resched_timer);
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500623#ifdef CONFIG_POSIX_TIMERS
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800624 /*
625 * itimer signal ?
626 *
627 * itimers are process shared and we restart periodic
628 * itimers in the signal delivery path to prevent DoS
629 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700630 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800631 * itimers, as the SIGALRM is a legacy signal and only
632 * queued once. Changing the restart behaviour to
633 * restart the timer in the signal dequeue path is
634 * reducing the timer noise on heavy loaded !highres
635 * systems too.
636 */
637 if (unlikely(signr == SIGALRM)) {
638 struct hrtimer *tmr = &tsk->signal->real_timer;
639
640 if (!hrtimer_is_queued(tmr) &&
Thomas Gleixner2456e852016-12-25 11:38:40 +0100641 tsk->signal->it_real_incr != 0) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800642 hrtimer_forward(tmr, tmr->base->get_time(),
643 tsk->signal->it_real_incr);
644 hrtimer_restart(tmr);
645 }
646 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500647#endif
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800648 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700649
Davide Libenzib8fceee2007-09-20 12:40:16 -0700650 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700651 if (!signr)
652 return 0;
653
654 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800655 /*
656 * Set a marker that we have dequeued a stop signal. Our
657 * caller might release the siglock and then the pending
658 * stop signal it is about to process is no longer in the
659 * pending bitmasks, but must still be cleared by a SIGCONT
660 * (and overruled by a SIGKILL). So those cases clear this
661 * shared flag after we've set it. Note that this flag may
662 * remain set after the signal we return is ignored or
663 * handled. That doesn't matter because its only purpose
664 * is to alert stop-signal processing code when another
665 * processor has come along and cleared the flag.
666 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200667 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800668 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500669#ifdef CONFIG_POSIX_TIMERS
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500670 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 /*
672 * Release the siglock to ensure proper locking order
673 * of timer locks outside of siglocks. Note, we leave
674 * irqs disabled here, since the posix-timers code is
675 * about to disable them again anyway.
676 */
677 spin_unlock(&tsk->sighand->siglock);
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200678 posixtimer_rearm(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 spin_lock(&tsk->sighand->siglock);
Eric W. Biederman9943d3a2017-07-24 14:53:03 -0500680
681 /* Don't expose the si_sys_private value to userspace */
682 info->si_sys_private = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500684#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return signr;
686}
687
688/*
689 * Tell a process that it has a new active signal..
690 *
691 * NOTE! we rely on the previous spin_lock to
692 * lock interrupts for us! We can only be called with
693 * "siglock" held, and the local interrupt must
694 * have been disabled when that got acquired!
695 *
696 * No need to set need_resched since signal event passing
697 * goes through ->blocked
698 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100699void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100703 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500704 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 * executing another processor and just now entering stopped state.
706 * By using wake_up_state, we ensure the process will wake up and
707 * handle its death signal.
708 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100709 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 kick_process(t);
711}
712
713/*
714 * Remove signals in mask from the pending set and queue.
715 * Returns 1 if any signals were found.
716 *
717 * All callers must be holding the siglock.
George Anzinger71fabd5e2006-01-08 01:02:48 -0800718 */
Christian Brauner8f113512018-08-21 22:00:38 -0700719static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd5e2006-01-08 01:02:48 -0800720{
721 struct sigqueue *q, *n;
722 sigset_t m;
723
724 sigandsets(&m, mask, &s->signal);
725 if (sigisemptyset(&m))
Christian Brauner8f113512018-08-21 22:00:38 -0700726 return;
George Anzinger71fabd5e2006-01-08 01:02:48 -0800727
Oleg Nesterov702a5072011-04-27 22:01:27 +0200728 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd5e2006-01-08 01:02:48 -0800729 list_for_each_entry_safe(q, n, &s->list, list) {
730 if (sigismember(mask, q->info.si_signo)) {
731 list_del_init(&q->list);
732 __sigqueue_free(q);
733 }
734 }
George Anzinger71fabd5e2006-01-08 01:02:48 -0800735}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Oleg Nesterov614c5172009-12-15 16:47:22 -0800737static inline int is_si_special(const struct siginfo *info)
738{
739 return info <= SEND_SIG_FORCED;
740}
741
742static inline bool si_fromuser(const struct siginfo *info)
743{
744 return info == SEND_SIG_NOINFO ||
745 (!is_si_special(info) && SI_FROMUSER(info));
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700749 * called with RCU read lock from check_kill_permission()
750 */
Christian Brauner2a9b9092018-08-21 22:00:11 -0700751static bool kill_ok_by_cred(struct task_struct *t)
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700752{
753 const struct cred *cred = current_cred();
754 const struct cred *tcred = __task_cred(t);
755
Christian Brauner2a9b9092018-08-21 22:00:11 -0700756 return uid_eq(cred->euid, tcred->suid) ||
757 uid_eq(cred->euid, tcred->uid) ||
758 uid_eq(cred->uid, tcred->suid) ||
759 uid_eq(cred->uid, tcred->uid) ||
760 ns_capable(tcred->user_ns, CAP_KILL);
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700761}
762
763/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100765 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 */
767static int check_kill_permission(int sig, struct siginfo *info,
768 struct task_struct *t)
769{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700770 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700771 int error;
772
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700773 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700774 return -EINVAL;
775
Oleg Nesterov614c5172009-12-15 16:47:22 -0800776 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700777 return 0;
778
779 error = audit_signal_info(sig, t); /* Let audit system see the signal */
780 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400782
Oleg Nesterov065add32010-05-26 14:42:54 -0700783 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700784 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700785 switch (sig) {
786 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700787 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700788 /*
789 * We don't return the error if sid == NULL. The
790 * task was unhashed, the caller must notice this.
791 */
792 if (!sid || sid == task_session(current))
793 break;
794 default:
795 return -EPERM;
796 }
797 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100798
Stephen Smalley6b4f3d02017-09-08 12:40:01 -0400799 return security_task_kill(t, info, sig, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Tejun Heofb1d9102011-06-14 11:20:17 +0200802/**
803 * ptrace_trap_notify - schedule trap to notify ptracer
804 * @t: tracee wanting to notify tracer
805 *
806 * This function schedules sticky ptrace trap which is cleared on the next
807 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
808 * ptracer.
809 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200810 * If @t is running, STOP trap will be taken. If trapped for STOP and
811 * ptracer is listening for events, tracee is woken up so that it can
812 * re-trap for the new event. If trapped otherwise, STOP trap will be
813 * eventually taken without returning to userland after the existing traps
814 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200815 *
816 * CONTEXT:
817 * Must be called with @task->sighand->siglock held.
818 */
819static void ptrace_trap_notify(struct task_struct *t)
820{
821 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
822 assert_spin_locked(&t->sighand->siglock);
823
824 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100825 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200826}
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700829 * Handle magic process-wide effects of stop/continue signals. Unlike
830 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * time regardless of blocking, ignoring, or handling. This does the
832 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700833 * signals. The process stop is done as a signal action for SIG_DFL.
834 *
835 * Returns true if the signal should be actually delivered, otherwise
836 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700838static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700840 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700842 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Oleg Nesterov403bad72013-04-30 15:28:10 -0700844 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800845 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700846 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700848 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700850 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /*
852 * This is a stop signal. Remove SIGCONT from all queues.
853 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700854 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700855 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700856 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700857 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700859 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200861 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700863 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700864 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700865 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700866 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200867 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200868 if (likely(!(t->ptrace & PT_SEIZED)))
869 wake_up_state(t, __TASK_STOPPED);
870 else
871 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700874 /*
875 * Notify the parent with CLD_CONTINUED if we were stopped.
876 *
877 * If we were in the middle of a group stop, we pretend it
878 * was already finished, and then continued. Since SIGCHLD
879 * doesn't queue we report only CLD_STOPPED, as if the next
880 * CLD_CONTINUED was dropped.
881 */
882 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700883 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700884 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700885 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 why |= SIGNAL_CLD_STOPPED;
887
888 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700889 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700890 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700891 * will take ->siglock, notice SIGNAL_CLD_MASK, and
892 * notify its parent. See get_signal_to_deliver().
893 */
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800894 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700895 signal->group_stop_count = 0;
896 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700899
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700900 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700903/*
904 * Test if P wants to take SIG. After we've checked all threads with this,
905 * it's equivalent to finding no threads not blocking SIG. Any threads not
906 * blocking SIG were ruled out because they are not running and already
907 * have pending signals. Such threads will dequeue from the shared queue
908 * as soon as they're available, so putting the signal on the shared queue
909 * will be equivalent to sending it to one such thread.
910 */
Christian Brauneracd14e62018-08-21 22:00:42 -0700911static inline bool wants_signal(int sig, struct task_struct *p)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700912{
913 if (sigismember(&p->blocked, sig))
Christian Brauneracd14e62018-08-21 22:00:42 -0700914 return false;
915
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700916 if (p->flags & PF_EXITING)
Christian Brauneracd14e62018-08-21 22:00:42 -0700917 return false;
918
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700919 if (sig == SIGKILL)
Christian Brauneracd14e62018-08-21 22:00:42 -0700920 return true;
921
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700922 if (task_is_stopped_or_traced(p))
Christian Brauneracd14e62018-08-21 22:00:42 -0700923 return false;
924
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700925 return task_curr(p) || !signal_pending(p);
926}
927
Eric W. Biederman07296142018-07-13 21:39:13 -0500928static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700929{
930 struct signal_struct *signal = p->signal;
931 struct task_struct *t;
932
933 /*
934 * Now find a thread we can wake up to take the signal off the queue.
935 *
936 * If the main thread wants the signal, it gets first crack.
937 * Probably the least surprising to the average bear.
938 */
939 if (wants_signal(sig, p))
940 t = p;
Eric W. Biederman07296142018-07-13 21:39:13 -0500941 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700942 /*
943 * There is just one thread and it does not need to be woken.
944 * It will dequeue unblocked signals before it runs again.
945 */
946 return;
947 else {
948 /*
949 * Otherwise try to find a suitable thread.
950 */
951 t = signal->curr_target;
952 while (!wants_signal(sig, t)) {
953 t = next_thread(t);
954 if (t == signal->curr_target)
955 /*
956 * No thread needs to be woken.
957 * Any eligible threads will see
958 * the signal in the queue soon.
959 */
960 return;
961 }
962 signal->curr_target = t;
963 }
964
965 /*
966 * Found a killable thread. If the signal will be fatal,
967 * then start taking the whole group down immediately.
968 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700969 if (sig_fatal(p, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800970 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700971 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -0800972 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700973 /*
974 * This signal will be fatal to the whole group.
975 */
976 if (!sig_kernel_coredump(sig)) {
977 /*
978 * Start a group exit and wake everybody up.
979 * This way we don't have other threads
980 * running and doing things after a slower
981 * thread has the fatal signal pending.
982 */
983 signal->flags = SIGNAL_GROUP_EXIT;
984 signal->group_exit_code = sig;
985 signal->group_stop_count = 0;
986 t = p;
987 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200988 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700989 sigaddset(&t->pending.signal, SIGKILL);
990 signal_wake_up(t, 1);
991 } while_each_thread(p, t);
992 return;
993 }
994 }
995
996 /*
997 * The signal is already in the shared-pending queue.
998 * Tell the chosen thread to wake up and dequeue it.
999 */
1000 signal_wake_up(t, sig == SIGKILL);
1001 return;
1002}
1003
Christian Braunera19e2c02018-08-21 22:00:46 -07001004static inline bool legacy_queue(struct sigpending *signals, int sig)
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001005{
1006 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1007}
1008
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001009#ifdef CONFIG_USER_NS
1010static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1011{
1012 if (current_user_ns() == task_cred_xxx(t, user_ns))
1013 return;
1014
1015 if (SI_FROMKERNEL(info))
1016 return;
1017
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001018 rcu_read_lock();
1019 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1020 make_kuid(current_user_ns(), info->si_uid));
1021 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001022}
1023#else
1024static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025{
1026 return;
1027}
1028#endif
1029
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001030static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001031 enum pid_type type, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001033 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001034 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001035 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001036 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001037
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001038 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001039
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001040 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001041 if (!prepare_signal(sig, t,
Eric W. Biederman3597dfe2018-09-03 20:02:46 +02001042 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001043 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001044
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001045 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001047 * Short-circuit ignored signals and support queuing
1048 * exactly one non-rt signal, so that we can get more
1049 * detailed information about the cause of the signal.
1050 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001051 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001052 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001053 goto ret;
1054
1055 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001056 /*
Eric W. Biedermanf149b312018-09-03 09:50:36 +02001057 * Skip useless siginfo allocation for SIGKILL SIGSTOP,
1058 * and kernel threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 */
Eric W. Biedermanf149b312018-09-03 09:50:36 +02001060 if ((info == SEND_SIG_FORCED) ||
1061 sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 goto out_set;
1063
Randy Dunlap5aba0852011-04-04 14:59:31 -07001064 /*
1065 * Real-time signals must be queued if sent by sigqueue, or
1066 * some other real-time mechanism. It is implementation
1067 * defined whether kill() does so. We attempt to do so, on
1068 * the principle of least surprise, but since kill is not
1069 * allowed to fail with EAGAIN when low on memory we just
1070 * make sure at least one signal gets delivered and don't
1071 * pass on the info struct.
1072 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001073 if (sig < SIGRTMIN)
1074 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1075 else
1076 override_rlimit = 0;
1077
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001078 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001080 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001082 case (unsigned long) SEND_SIG_NOINFO:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001083 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 q->info.si_signo = sig;
1085 q->info.si_errno = 0;
1086 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001087 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001088 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001089 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001091 case (unsigned long) SEND_SIG_PRIV:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001092 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 q->info.si_signo = sig;
1094 q->info.si_errno = 0;
1095 q->info.si_code = SI_KERNEL;
1096 q->info.si_pid = 0;
1097 q->info.si_uid = 0;
1098 break;
1099 default:
1100 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001101 if (from_ancestor_ns)
1102 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 break;
1104 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001105
1106 userns_fixup_signal_uid(&q->info, t);
1107
Oleg Nesterov621d3122005-10-30 15:03:45 -08001108 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001109 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1110 /*
1111 * Queue overflow, abort. We may abort if the
1112 * signal was rt and sent by user using something
1113 * other than kill().
1114 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001115 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1116 ret = -EAGAIN;
1117 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001118 } else {
1119 /*
1120 * This is a silent loss of information. We still
1121 * send the signal, but the *info bits are lost.
1122 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001123 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
1126
1127out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001128 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001129 sigaddset(&pending->signal, sig);
Eric W. Biedermanc3ad2c32018-07-23 15:20:37 -05001130
1131 /* Let multiprocess signals appear after on-going forks */
1132 if (type > PIDTYPE_TGID) {
1133 struct multiprocess_signals *delayed;
1134 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1135 sigset_t *signal = &delayed->signal;
1136 /* Can't queue both a stop and a continue signal */
1137 if (sig == SIGCONT)
1138 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1139 else if (sig_kernel_stop(sig))
1140 sigdelset(signal, SIGCONT);
1141 sigaddset(signal, sig);
1142 }
1143 }
1144
Eric W. Biederman07296142018-07-13 21:39:13 -05001145 complete_signal(sig, t, type);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001146ret:
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001147 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001148 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149}
1150
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001151static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001152 enum pid_type type)
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001153{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001154 int from_ancestor_ns = 0;
1155
1156#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001157 from_ancestor_ns = si_fromuser(info) &&
1158 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001159#endif
1160
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001161 return __send_signal(sig, info, t, type, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001162}
1163
Al Viro4aaefee2012-11-05 13:09:56 -05001164static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001165{
Al Viro4aaefee2012-11-05 13:09:56 -05001166 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001167 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001168
Al Viroca5cd872007-10-29 04:31:16 +00001169#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001170 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001171 {
1172 int i;
1173 for (i = 0; i < 16; i++) {
1174 unsigned char insn;
1175
Andi Kleenb45c6e72010-01-08 14:42:52 -08001176 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1177 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001178 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001179 }
1180 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001181 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001182#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001183 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001184 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001185 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001186}
1187
1188static int __init setup_print_fatal_signals(char *str)
1189{
1190 get_option (&str, &print_fatal_signals);
1191
1192 return 1;
1193}
1194
1195__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001197int
1198__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1199{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001200 return send_signal(sig, info, p, PIDTYPE_TGID);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001201}
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203static int
1204specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1205{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001206 return send_signal(sig, info, t, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207}
1208
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001209int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001210 enum pid_type type)
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001211{
1212 unsigned long flags;
1213 int ret = -ESRCH;
1214
1215 if (lock_task_sighand(p, &flags)) {
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001216 ret = send_signal(sig, info, p, type);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001217 unlock_task_sighand(p, &flags);
1218 }
1219
1220 return ret;
1221}
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223/*
1224 * Force a signal that the process can't ignore: if necessary
1225 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001226 *
1227 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1228 * since we do not want to have a signal handler that was blocked
1229 * be invoked when user space had explicitly blocked it.
1230 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001231 * We don't want to have recursive SIGSEGV's etc, for example,
1232 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234int
1235force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1236{
1237 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001238 int ret, blocked, ignored;
1239 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001242 action = &t->sighand->action[sig-1];
1243 ignored = action->sa.sa_handler == SIG_IGN;
1244 blocked = sigismember(&t->blocked, sig);
1245 if (blocked || ignored) {
1246 action->sa.sa_handler = SIG_DFL;
1247 if (blocked) {
1248 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001249 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Jamie Ileseb61b592017-08-18 15:16:18 -07001252 /*
1253 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1254 * debugging to leave init killable.
1255 */
1256 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001257 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 ret = specific_send_sig_info(sig, info, t);
1259 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1260
1261 return ret;
1262}
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264/*
1265 * Nuke all other threads in the group.
1266 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001267int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001269 struct task_struct *t = p;
1270 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 p->signal->group_stop_count = 0;
1273
Oleg Nesterov09faef12010-05-26 14:43:11 -07001274 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001275 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001276 count++;
1277
1278 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 if (t->exit_state)
1280 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 signal_wake_up(t, 1);
1283 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001284
1285 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286}
1287
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001288struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1289 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001290{
1291 struct sighand_struct *sighand;
1292
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001293 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001294 for (;;) {
1295 sighand = rcu_dereference(tsk->sighand);
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001296 if (unlikely(sighand == NULL))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001297 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001298
Oleg Nesterov392809b2014-09-28 23:44:18 +02001299 /*
1300 * This sighand can be already freed and even reused, but
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001301 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
Oleg Nesterov392809b2014-09-28 23:44:18 +02001302 * initializes ->siglock: this slab can't go away, it has
1303 * the same object type, ->siglock can't be reinitialized.
1304 *
1305 * We need to ensure that tsk->sighand is still the same
1306 * after we take the lock, we can race with de_thread() or
1307 * __exit_signal(). In the latter case the next iteration
1308 * must see ->sighand == NULL.
1309 */
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001310 spin_lock_irqsave(&sighand->siglock, *flags);
1311 if (likely(sighand == tsk->sighand))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001312 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001313 spin_unlock_irqrestore(&sighand->siglock, *flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001314 }
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001315 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001316
1317 return sighand;
1318}
1319
David Howellsc69e8d92008-11-14 10:39:19 +11001320/*
1321 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001322 */
Eric W. Biederman01024982018-07-13 18:40:57 -05001323int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1324 enum pid_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325{
David Howells694f6902010-08-04 16:59:14 +01001326 int ret;
1327
1328 rcu_read_lock();
1329 ret = check_kill_permission(sig, info, p);
1330 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001332 if (!ret && sig)
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001333 ret = do_send_sig_info(sig, info, p, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 return ret;
1336}
1337
1338/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001339 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001341 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001343int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
1345 struct task_struct *p = NULL;
1346 int retval, success;
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 success = 0;
1349 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001350 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001351 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 success |= !err;
1353 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001354 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 return success ? 0 : retval;
1356}
1357
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001358int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001360 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 struct task_struct *p;
1362
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001363 for (;;) {
1364 rcu_read_lock();
1365 p = pid_task(pid, PIDTYPE_PID);
1366 if (p)
Eric W. Biederman01024982018-07-13 18:40:57 -05001367 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001368 rcu_read_unlock();
1369 if (likely(!p || error != -ESRCH))
1370 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001371
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001372 /*
1373 * The task was unhashed in between, try again. If it
1374 * is dead, pid_task() will return NULL, if we race with
1375 * de_thread() it will find the new leader.
1376 */
1377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
Eric W. Biederman6c478ae2017-04-17 22:10:04 -05001380static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001381{
1382 int error;
1383 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001384 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001385 rcu_read_unlock();
1386 return error;
1387}
1388
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001389static inline bool kill_as_cred_perm(const struct cred *cred,
1390 struct task_struct *target)
Serge Hallynd178bc32011-09-26 10:45:18 -05001391{
1392 const struct cred *pcred = __task_cred(target);
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001393
1394 return uid_eq(cred->euid, pcred->suid) ||
1395 uid_eq(cred->euid, pcred->uid) ||
1396 uid_eq(cred->uid, pcred->suid) ||
1397 uid_eq(cred->uid, pcred->uid);
Serge Hallynd178bc32011-09-26 10:45:18 -05001398}
1399
Eric W. Biederman2425c082006-10-02 02:17:28 -07001400/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001401int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001402 const struct cred *cred)
Harald Welte46113832005-10-10 19:44:29 +02001403{
1404 int ret = -EINVAL;
1405 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001406 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001407
1408 if (!valid_signal(sig))
1409 return ret;
1410
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001411 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001412 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001413 if (!p) {
1414 ret = -ESRCH;
1415 goto out_unlock;
1416 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001417 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001418 ret = -EPERM;
1419 goto out_unlock;
1420 }
Stephen Smalley6b4f3d02017-09-08 12:40:01 -04001421 ret = security_task_kill(p, info, sig, cred);
David Quigley8f95dc52006-06-30 01:55:47 -07001422 if (ret)
1423 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001424
1425 if (sig) {
1426 if (lock_task_sighand(p, &flags)) {
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001427 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001428 unlock_task_sighand(p, &flags);
1429 } else
1430 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001431 }
1432out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001433 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001434 return ret;
1435}
Serge Hallynd178bc32011-09-26 10:45:18 -05001436EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
1438/*
1439 * kill_something_info() interprets pid in interesting ways just like kill(2).
1440 *
1441 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1442 * is probably wrong. Should make it like BSD or SYSV.
1443 */
1444
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001445static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001447 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001448
1449 if (pid > 0) {
1450 rcu_read_lock();
1451 ret = kill_pid_info(sig, info, find_vpid(pid));
1452 rcu_read_unlock();
1453 return ret;
1454 }
1455
zhongjiang4ea77012017-07-10 15:52:57 -07001456 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1457 if (pid == INT_MIN)
1458 return -ESRCH;
1459
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001460 read_lock(&tasklist_lock);
1461 if (pid != -1) {
1462 ret = __kill_pgrp_info(sig, info,
1463 pid ? find_vpid(-pid) : task_pgrp(current));
1464 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 int retval = 0, count = 0;
1466 struct task_struct * p;
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001469 if (task_pid_vnr(p) > 1 &&
1470 !same_thread_group(p, current)) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001471 int err = group_send_sig_info(sig, info, p,
1472 PIDTYPE_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 ++count;
1474 if (err != -EPERM)
1475 retval = err;
1476 }
1477 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001478 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001480 read_unlock(&tasklist_lock);
1481
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001482 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483}
1484
1485/*
1486 * These are for backward compatibility with the rest of the kernel source.
1487 */
1488
Randy Dunlap5aba0852011-04-04 14:59:31 -07001489int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 /*
1492 * Make sure legacy kernel users don't send in bad values
1493 * (normal paths check this in check_kill_permission).
1494 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001495 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return -EINVAL;
1497
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001498 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499}
1500
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001501#define __si_special(priv) \
1502 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504int
1505send_sig(int sig, struct task_struct *p, int priv)
1506{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001507 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
Christian Brauner52cba1a2018-08-21 21:59:51 -07001510void force_sig(int sig, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001512 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513}
1514
1515/*
1516 * When things go south during signal handling, we
1517 * will force a SIGSEGV. And if the signal that caused
1518 * the problem was already a SIGSEGV, we'll want to
1519 * make sure we don't even try to deliver the signal..
1520 */
Christian Brauner52cba1a2018-08-21 21:59:51 -07001521void force_sigsegv(int sig, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522{
1523 if (sig == SIGSEGV) {
1524 unsigned long flags;
1525 spin_lock_irqsave(&p->sighand->siglock, flags);
1526 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1527 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1528 }
1529 force_sig(SIGSEGV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530}
1531
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001532int force_sig_fault(int sig, int code, void __user *addr
1533 ___ARCH_SI_TRAPNO(int trapno)
1534 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1535 , struct task_struct *t)
1536{
1537 struct siginfo info;
1538
1539 clear_siginfo(&info);
1540 info.si_signo = sig;
1541 info.si_errno = 0;
1542 info.si_code = code;
1543 info.si_addr = addr;
1544#ifdef __ARCH_SI_TRAPNO
1545 info.si_trapno = trapno;
1546#endif
1547#ifdef __ia64__
1548 info.si_imm = imm;
1549 info.si_flags = flags;
1550 info.si_isr = isr;
1551#endif
1552 return force_sig_info(info.si_signo, &info, t);
1553}
1554
1555int send_sig_fault(int sig, int code, void __user *addr
1556 ___ARCH_SI_TRAPNO(int trapno)
1557 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1558 , struct task_struct *t)
1559{
1560 struct siginfo info;
1561
1562 clear_siginfo(&info);
1563 info.si_signo = sig;
1564 info.si_errno = 0;
1565 info.si_code = code;
1566 info.si_addr = addr;
1567#ifdef __ARCH_SI_TRAPNO
1568 info.si_trapno = trapno;
1569#endif
1570#ifdef __ia64__
1571 info.si_imm = imm;
1572 info.si_flags = flags;
1573 info.si_isr = isr;
1574#endif
1575 return send_sig_info(info.si_signo, &info, t);
1576}
1577
Eric W. Biederman38246732018-01-18 18:54:31 -06001578int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1579{
1580 struct siginfo info;
1581
1582 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1583 clear_siginfo(&info);
1584 info.si_signo = SIGBUS;
1585 info.si_errno = 0;
1586 info.si_code = code;
1587 info.si_addr = addr;
1588 info.si_addr_lsb = lsb;
1589 return force_sig_info(info.si_signo, &info, t);
1590}
1591
1592int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1593{
1594 struct siginfo info;
1595
1596 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1597 clear_siginfo(&info);
1598 info.si_signo = SIGBUS;
1599 info.si_errno = 0;
1600 info.si_code = code;
1601 info.si_addr = addr;
1602 info.si_addr_lsb = lsb;
1603 return send_sig_info(info.si_signo, &info, t);
1604}
1605EXPORT_SYMBOL(send_sig_mceerr);
Eric W. Biederman38246732018-01-18 18:54:31 -06001606
Eric W. Biederman38246732018-01-18 18:54:31 -06001607int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1608{
1609 struct siginfo info;
1610
1611 clear_siginfo(&info);
1612 info.si_signo = SIGSEGV;
1613 info.si_errno = 0;
1614 info.si_code = SEGV_BNDERR;
1615 info.si_addr = addr;
1616 info.si_lower = lower;
1617 info.si_upper = upper;
1618 return force_sig_info(info.si_signo, &info, current);
1619}
Eric W. Biederman38246732018-01-18 18:54:31 -06001620
1621#ifdef SEGV_PKUERR
1622int force_sig_pkuerr(void __user *addr, u32 pkey)
1623{
1624 struct siginfo info;
1625
1626 clear_siginfo(&info);
1627 info.si_signo = SIGSEGV;
1628 info.si_errno = 0;
1629 info.si_code = SEGV_PKUERR;
1630 info.si_addr = addr;
1631 info.si_pkey = pkey;
1632 return force_sig_info(info.si_signo, &info, current);
1633}
1634#endif
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001635
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001636/* For the crazy architectures that include trap information in
1637 * the errno field, instead of an actual errno value.
1638 */
1639int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1640{
1641 struct siginfo info;
1642
1643 clear_siginfo(&info);
1644 info.si_signo = SIGTRAP;
1645 info.si_errno = errno;
1646 info.si_code = TRAP_HWBKPT;
1647 info.si_addr = addr;
1648 return force_sig_info(info.si_signo, &info, current);
1649}
1650
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001651int kill_pgrp(struct pid *pid, int sig, int priv)
1652{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001653 int ret;
1654
1655 read_lock(&tasklist_lock);
1656 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1657 read_unlock(&tasklist_lock);
1658
1659 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001660}
1661EXPORT_SYMBOL(kill_pgrp);
1662
1663int kill_pid(struct pid *pid, int sig, int priv)
1664{
1665 return kill_pid_info(sig, __si_special(priv), pid);
1666}
1667EXPORT_SYMBOL(kill_pid);
1668
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669/*
1670 * These functions support sending signals using preallocated sigqueue
1671 * structures. This is needed "because realtime applications cannot
1672 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001673 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 * we allocate the sigqueue structure from the timer_create. If this
1675 * allocation fails we are able to report the failure to the application
1676 * with an EAGAIN error.
1677 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678struct sigqueue *sigqueue_alloc(void)
1679{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001680 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001682 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001684
1685 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686}
1687
1688void sigqueue_free(struct sigqueue *q)
1689{
1690 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001691 spinlock_t *lock = &current->sighand->siglock;
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1694 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001695 * We must hold ->siglock while testing q->list
1696 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001697 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001699 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001700 q->flags &= ~SIGQUEUE_PREALLOC;
1701 /*
1702 * If it is queued it will be freed when dequeued,
1703 * like the "regular" sigqueue.
1704 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001705 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001706 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001707 spin_unlock_irqrestore(lock, flags);
1708
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001709 if (q)
1710 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711}
1712
Eric W. Biederman24122c72018-07-20 14:30:23 -05001713int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001714{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001715 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001716 struct sigpending *pending;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001717 struct task_struct *t;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001718 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001719 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001720
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001721 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001722
1723 ret = -1;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001724 rcu_read_lock();
1725 t = pid_task(pid, type);
1726 if (!t || !likely(lock_task_sighand(t, &flags)))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001727 goto ret;
1728
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001729 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001730 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001731 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001732 goto out;
1733
1734 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001735 if (unlikely(!list_empty(&q->list))) {
1736 /*
1737 * If an SI_TIMER entry is already queue just increment
1738 * the overrun count.
1739 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001740 BUG_ON(q->info.si_code != SI_TIMER);
1741 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001742 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001743 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001744 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001745 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001746
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001747 signalfd_notify(t, sig);
Eric W. Biederman24122c72018-07-20 14:30:23 -05001748 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001749 list_add_tail(&q->list, &pending->list);
1750 sigaddset(&pending->signal, sig);
Eric W. Biederman07296142018-07-13 21:39:13 -05001751 complete_signal(sig, t, type);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001752 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001753out:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001754 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001755 unlock_task_sighand(t, &flags);
1756ret:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001757 rcu_read_unlock();
Oleg Nesterove62e6652008-04-30 00:52:56 -07001758 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001759}
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 * Let a parent know about the death of a child.
1763 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001764 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001765 * Returns true if our parent ignored us and so we've switched to
1766 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001768bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
1770 struct siginfo info;
1771 unsigned long flags;
1772 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001773 bool autoreap = false;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001774 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776 BUG_ON(sig == -1);
1777
1778 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001779 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Tejun Heod21142e2011-06-17 16:50:34 +02001781 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1783
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001784 if (sig != SIGCHLD) {
1785 /*
1786 * This is only possible if parent == real_parent.
1787 * Check if it has changed security domain.
1788 */
1789 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1790 sig = SIGCHLD;
1791 }
1792
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001793 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 info.si_signo = sig;
1795 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001796 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001797 * We are under tasklist_lock here so our parent is tied to
1798 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001799 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001800 * task_active_pid_ns will always return the same pid namespace
1801 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001802 *
1803 * write_lock() currently calls preempt_disable() which is the
1804 * same as rcu_read_lock(), but according to Oleg, this is not
1805 * correct to rely on this
1806 */
1807 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001808 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001809 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1810 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001811 rcu_read_unlock();
1812
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001813 task_cputime(tsk, &utime, &stime);
1814 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1815 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
1817 info.si_status = tsk->exit_code & 0x7f;
1818 if (tsk->exit_code & 0x80)
1819 info.si_code = CLD_DUMPED;
1820 else if (tsk->exit_code & 0x7f)
1821 info.si_code = CLD_KILLED;
1822 else {
1823 info.si_code = CLD_EXITED;
1824 info.si_status = tsk->exit_code >> 8;
1825 }
1826
1827 psig = tsk->parent->sighand;
1828 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001829 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1831 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1832 /*
1833 * We are exiting and our parent doesn't care. POSIX.1
1834 * defines special semantics for setting SIGCHLD to SIG_IGN
1835 * or setting the SA_NOCLDWAIT flag: we should be reaped
1836 * automatically and not left for our parent's wait4 call.
1837 * Rather than having the parent do it as a magic kind of
1838 * signal handler, we just set this to tell do_exit that we
1839 * can be cleaned up without becoming a zombie. Note that
1840 * we still call __wake_up_parent in this case, because a
1841 * blocked sys_wait4 might now return -ECHILD.
1842 *
1843 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1844 * is implementation-defined: we do (if you don't want
1845 * it, just use SIG_IGN instead).
1846 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001847 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001849 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001851 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 __group_send_sig_info(sig, &info, tsk->parent);
1853 __wake_up_parent(tsk, tsk->parent);
1854 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001855
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001856 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857}
1858
Tejun Heo75b95952011-03-23 10:37:01 +01001859/**
1860 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1861 * @tsk: task reporting the state change
1862 * @for_ptracer: the notification is for ptracer
1863 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1864 *
1865 * Notify @tsk's parent that the stopped/continued state has changed. If
1866 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1867 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1868 *
1869 * CONTEXT:
1870 * Must be called with tasklist_lock at least read locked.
1871 */
1872static void do_notify_parent_cldstop(struct task_struct *tsk,
1873 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 struct siginfo info;
1876 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001877 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 struct sighand_struct *sighand;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001879 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Tejun Heo75b95952011-03-23 10:37:01 +01001881 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001882 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001883 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001884 tsk = tsk->group_leader;
1885 parent = tsk->real_parent;
1886 }
1887
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001888 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 info.si_signo = SIGCHLD;
1890 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001891 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001892 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001893 */
1894 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001895 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001896 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001897 rcu_read_unlock();
1898
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001899 task_cputime(tsk, &utime, &stime);
1900 info.si_utime = nsec_to_clock_t(utime);
1901 info.si_stime = nsec_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
1903 info.si_code = why;
1904 switch (why) {
1905 case CLD_CONTINUED:
1906 info.si_status = SIGCONT;
1907 break;
1908 case CLD_STOPPED:
1909 info.si_status = tsk->signal->group_exit_code & 0x7f;
1910 break;
1911 case CLD_TRAPPED:
1912 info.si_status = tsk->exit_code & 0x7f;
1913 break;
1914 default:
1915 BUG();
1916 }
1917
1918 sighand = parent->sighand;
1919 spin_lock_irqsave(&sighand->siglock, flags);
1920 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1921 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1922 __group_send_sig_info(SIGCHLD, &info, parent);
1923 /*
1924 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1925 */
1926 __wake_up_parent(tsk, parent);
1927 spin_unlock_irqrestore(&sighand->siglock, flags);
1928}
1929
Christian Brauner6527de92018-08-21 21:59:59 -07001930static inline bool may_ptrace_stop(void)
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001931{
Tejun Heod21142e2011-06-17 16:50:34 +02001932 if (!likely(current->ptrace))
Christian Brauner6527de92018-08-21 21:59:59 -07001933 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001934 /*
1935 * Are we in the middle of do_coredump?
1936 * If so and our tracer is also part of the coredump stopping
1937 * is a deadlock situation, and pointless because our tracer
1938 * is dead so don't allow us to stop.
1939 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001940 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001941 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001942 *
1943 * This is almost outdated, a task with the pending SIGKILL can't
1944 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1945 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001946 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001947 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001948 unlikely(current->mm == current->parent->mm))
Christian Brauner6527de92018-08-21 21:59:59 -07001949 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001950
Christian Brauner6527de92018-08-21 21:59:59 -07001951 return true;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001952}
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001955 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08001956 * Called with the siglock held.
1957 */
Christian Braunerf99e9d82018-08-21 22:00:50 -07001958static bool sigkill_pending(struct task_struct *tsk)
Roland McGrath1a669c22008-02-06 01:37:37 -08001959{
Christian Braunerf99e9d82018-08-21 22:00:50 -07001960 return sigismember(&tsk->pending.signal, SIGKILL) ||
1961 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001962}
1963
1964/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 * This must be called with current->sighand->siglock held.
1966 *
1967 * This should be the path for all ptrace stops.
1968 * We always set current->last_siginfo while stopped here.
1969 * That makes it a way to test a stopped process for
1970 * being ptrace-stopped vs being job-control-stopped.
1971 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001972 * If we actually decide not to stop at all because the tracer
1973 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001975static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001976 __releases(&current->sighand->siglock)
1977 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001979 bool gstop_done = false;
1980
Roland McGrath1a669c22008-02-06 01:37:37 -08001981 if (arch_ptrace_stop_needed(exit_code, info)) {
1982 /*
1983 * The arch code has something special to do before a
1984 * ptrace stop. This is allowed to block, e.g. for faults
1985 * on user stack pages. We can't keep the siglock while
1986 * calling arch_ptrace_stop, so we must release it now.
1987 * To preserve proper semantics, we must do this before
1988 * any signal bookkeeping like checking group_stop_count.
1989 * Meanwhile, a SIGKILL could come in before we retake the
1990 * siglock. That must prevent us from sleeping in TASK_TRACED.
1991 * So after regaining the lock, we must check for SIGKILL.
1992 */
1993 spin_unlock_irq(&current->sighand->siglock);
1994 arch_ptrace_stop(exit_code, info);
1995 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001996 if (sigkill_pending(current))
1997 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001998 }
1999
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002000 set_special_state(TASK_TRACED);
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02002003 * We're committing to trapping. TRACED should be visible before
2004 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2005 * Also, transition to TRACED and updates to ->jobctl should be
2006 * atomic with respect to siglock and should be done after the arch
2007 * hook as siglock is released and regrabbed across it.
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002008 *
2009 * TRACER TRACEE
2010 *
2011 * ptrace_attach()
2012 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2013 * do_wait()
2014 * set_current_state() smp_wmb();
2015 * ptrace_do_wait()
2016 * wait_task_stopped()
2017 * task_stopped_code()
2018 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 */
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002020 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
2022 current->last_siginfo = info;
2023 current->exit_code = exit_code;
2024
Tejun Heod79fdd62011-03-23 10:37:00 +01002025 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 * If @why is CLD_STOPPED, we're trapping to participate in a group
2027 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02002028 * across siglock relocks since INTERRUPT was scheduled, PENDING
2029 * could be clear now. We act as if SIGCONT is received after
2030 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01002031 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002032 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002034
Tejun Heofb1d9102011-06-14 11:20:17 +02002035 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02002036 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02002037 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2038 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02002039
Tejun Heo81be24b2011-06-02 11:13:59 +02002040 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002041 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 spin_unlock_irq(&current->sighand->siglock);
2044 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07002045 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01002046 /*
2047 * Notify parents of the stop.
2048 *
2049 * While ptraced, there are two parents - the ptracer and
2050 * the real_parent of the group_leader. The ptracer should
2051 * know about every stop while the real parent is only
2052 * interested in the completion of group stop. The states
2053 * for the two don't interact with each other. Notify
2054 * separately unless they're gonna be duplicates.
2055 */
2056 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002057 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01002058 do_notify_parent_cldstop(current, false, why);
2059
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002060 /*
2061 * Don't want to allow preemption here, because
2062 * sys_ptrace() needs this task to be inactive.
2063 *
2064 * XXX: implement read_unlock_no_resched().
2065 */
2066 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002068 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002069 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 } else {
2071 /*
2072 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002073 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01002074 *
2075 * If @gstop_done, the ptracer went away between group stop
2076 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02002077 * JOBCTL_STOP_PENDING on us and we'll re-enter
2078 * TASK_STOPPED in do_signal_stop() on return, so notifying
2079 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01002081 if (gstop_done)
2082 do_notify_parent_cldstop(current, false, why);
2083
Oleg Nesterov9899d112013-01-21 20:48:00 +01002084 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002085 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08002086 if (clear_code)
2087 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002088 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
2090
2091 /*
2092 * We are back. Now reacquire the siglock before touching
2093 * last_siginfo, so that we are sure to have synchronized with
2094 * any signal-sending on another CPU that wants to examine it.
2095 */
2096 spin_lock_irq(&current->sighand->siglock);
2097 current->last_siginfo = NULL;
2098
Tejun Heo544b2c92011-06-14 11:20:18 +02002099 /* LISTENING can be set only during STOP traps, clear it */
2100 current->jobctl &= ~JOBCTL_LISTENING;
2101
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 /*
2103 * Queued signals ignored us while we were stopped for tracing.
2104 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07002105 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07002107 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
2109
Tejun Heo3544d722011-06-14 11:20:15 +02002110static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
2112 siginfo_t info;
2113
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002114 clear_siginfo(&info);
Tejun Heo3544d722011-06-14 11:20:15 +02002115 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002117 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002118 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02002121 ptrace_stop(exit_code, why, 1, &info);
2122}
2123
2124void ptrace_notify(int exit_code)
2125{
2126 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002127 if (unlikely(current->task_works))
2128 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02002129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02002131 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 spin_unlock_irq(&current->sighand->siglock);
2133}
2134
Tejun Heo73ddff22011-06-14 11:20:14 +02002135/**
2136 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2137 * @signr: signr causing group stop if initiating
2138 *
2139 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2140 * and participate in it. If already set, participate in the existing
2141 * group stop. If participated in a group stop (and thus slept), %true is
2142 * returned with siglock released.
2143 *
2144 * If ptraced, this function doesn't handle stop itself. Instead,
2145 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2146 * untouched. The caller must ensure that INTERRUPT trap handling takes
2147 * places afterwards.
2148 *
2149 * CONTEXT:
2150 * Must be called with @current->sighand->siglock held, which is released
2151 * on %true return.
2152 *
2153 * RETURNS:
2154 * %false if group stop is already cancelled or ptrace trap is scheduled.
2155 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002157static bool do_signal_stop(int signr)
2158 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159{
2160 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Tejun Heoa8f072c2011-06-02 11:13:59 +02002162 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002163 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002164 struct task_struct *t;
2165
Tejun Heoa8f072c2011-06-02 11:13:59 +02002166 /* signr will be recorded in task->jobctl for retries */
2167 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002168
Tejun Heoa8f072c2011-06-02 11:13:59 +02002169 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002170 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002171 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002173 * There is no group stop already in progress. We must
2174 * initiate one now.
2175 *
2176 * While ptraced, a task may be resumed while group stop is
2177 * still in effect and then receive a stop signal and
2178 * initiate another group stop. This deviates from the
2179 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002180 * cause two group stops when !ptraced. That is why we
2181 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002182 *
2183 * The condition can be distinguished by testing whether
2184 * SIGNAL_STOP_STOPPED is already set. Don't generate
2185 * group_exit_code in such case.
2186 *
2187 * This is not necessary for SIGNAL_STOP_CONTINUED because
2188 * an intervening stop signal is required to cause two
2189 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002191 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2192 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002193
Tejun Heo7dd3db52011-06-02 11:14:00 +02002194 sig->group_stop_count = 0;
2195
2196 if (task_set_jobctl_pending(current, signr | gstop))
2197 sig->group_stop_count++;
2198
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002199 t = current;
2200 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002201 /*
2202 * Setting state to TASK_STOPPED for a group
2203 * stop is always done with the siglock held,
2204 * so this check has no races.
2205 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002206 if (!task_is_stopped(t) &&
2207 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002208 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002209 if (likely(!(t->ptrace & PT_SEIZED)))
2210 signal_wake_up(t, 0);
2211 else
2212 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002213 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002214 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002215 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002216
Tejun Heod21142e2011-06-17 16:50:34 +02002217 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002218 int notify = 0;
2219
2220 /*
2221 * If there are no other threads in the group, or if there
2222 * is a group stop in progress and we are the last to stop,
2223 * report to the parent.
2224 */
2225 if (task_participate_group_stop(current))
2226 notify = CLD_STOPPED;
2227
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002228 set_special_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002229 spin_unlock_irq(&current->sighand->siglock);
2230
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002231 /*
2232 * Notify the parent of the group stop completion. Because
2233 * we're not holding either the siglock or tasklist_lock
2234 * here, ptracer may attach inbetween; however, this is for
2235 * group stop and should always be delivered to the real
2236 * parent of the group leader. The new ptracer will get
2237 * its notification when this task transitions into
2238 * TASK_TRACED.
2239 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002240 if (notify) {
2241 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002242 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002243 read_unlock(&tasklist_lock);
2244 }
2245
2246 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002247 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002248 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002249 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002250 /*
2251 * While ptraced, group stop is handled by STOP trap.
2252 * Schedule it and let the caller deal with it.
2253 */
2254 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2255 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002256 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002257}
Tejun Heod79fdd62011-03-23 10:37:00 +01002258
Tejun Heo73ddff22011-06-14 11:20:14 +02002259/**
2260 * do_jobctl_trap - take care of ptrace jobctl traps
2261 *
Tejun Heo3544d722011-06-14 11:20:15 +02002262 * When PT_SEIZED, it's used for both group stop and explicit
2263 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2264 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2265 * the stop signal; otherwise, %SIGTRAP.
2266 *
2267 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2268 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002269 *
2270 * CONTEXT:
2271 * Must be called with @current->sighand->siglock held, which may be
2272 * released and re-acquired before returning with intervening sleep.
2273 */
2274static void do_jobctl_trap(void)
2275{
Tejun Heo3544d722011-06-14 11:20:15 +02002276 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002277 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002278
Tejun Heo3544d722011-06-14 11:20:15 +02002279 if (current->ptrace & PT_SEIZED) {
2280 if (!signal->group_stop_count &&
2281 !(signal->flags & SIGNAL_STOP_STOPPED))
2282 signr = SIGTRAP;
2283 WARN_ON_ONCE(!signr);
2284 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2285 CLD_STOPPED);
2286 } else {
2287 WARN_ON_ONCE(!signr);
2288 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002289 current->exit_code = 0;
2290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291}
2292
Al Viro94eb22d2012-11-05 13:08:06 -05002293static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002294{
Oleg Nesterov8a352412011-07-21 17:06:53 +02002295 /*
2296 * We do not check sig_kernel_stop(signr) but set this marker
2297 * unconditionally because we do not know whether debugger will
2298 * change signr. This flag has no meaning unless we are going
2299 * to stop after return from ptrace_stop(). In this case it will
2300 * be checked in do_signal_stop(), we should only stop if it was
2301 * not cleared by SIGCONT while we were sleeping. See also the
2302 * comment in dequeue_signal().
2303 */
2304 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002305 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002306
2307 /* We're back. Did the debugger cancel the sig? */
2308 signr = current->exit_code;
2309 if (signr == 0)
2310 return signr;
2311
2312 current->exit_code = 0;
2313
Randy Dunlap5aba0852011-04-04 14:59:31 -07002314 /*
2315 * Update the siginfo structure if the signal has
2316 * changed. If the debugger wanted something
2317 * specific in the siginfo structure then it should
2318 * have updated *info via PTRACE_SETSIGINFO.
2319 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002320 if (signr != info->si_signo) {
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002321 clear_siginfo(info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002322 info->si_signo = signr;
2323 info->si_errno = 0;
2324 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002325 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002326 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002327 info->si_uid = from_kuid_munged(current_user_ns(),
2328 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002329 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002330 }
2331
2332 /* If the (new) signal is now blocked, requeue it. */
2333 if (sigismember(&current->blocked, signr)) {
2334 specific_send_sig_info(signr, info, current);
2335 signr = 0;
2336 }
2337
2338 return signr;
2339}
2340
Christian Brauner20ab7212018-08-21 22:00:54 -07002341bool get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002343 struct sighand_struct *sighand = current->sighand;
2344 struct signal_struct *signal = current->signal;
2345 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002347 if (unlikely(current->task_works))
2348 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002349
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302350 if (unlikely(uprobe_deny_signal()))
Christian Brauner20ab7212018-08-21 22:00:54 -07002351 return false;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302352
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002353 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002354 * Do this once, we can't return to user-mode if freezing() == T.
2355 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2356 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002357 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002358 try_to_freeze();
2359
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002360relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002361 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002362 /*
2363 * Every stopped thread goes here after wakeup. Check to see if
2364 * we should notify the parent, prepare_signal(SIGCONT) encodes
2365 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2366 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002367 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002368 int why;
2369
2370 if (signal->flags & SIGNAL_CLD_CONTINUED)
2371 why = CLD_CONTINUED;
2372 else
2373 why = CLD_STOPPED;
2374
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002375 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002376
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002377 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002378
Tejun Heoceb6bd62011-03-23 10:37:01 +01002379 /*
2380 * Notify the parent that we're continuing. This event is
2381 * always per-process and doesn't make whole lot of sense
2382 * for ptracers, who shouldn't consume the state via
2383 * wait(2) either, but, for backward compatibility, notify
2384 * the ptracer of the group leader too unless it's gonna be
2385 * a duplicate.
2386 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002387 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002388 do_notify_parent_cldstop(current, false, why);
2389
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002390 if (ptrace_reparented(current->group_leader))
2391 do_notify_parent_cldstop(current->group_leader,
2392 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002393 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002394
Oleg Nesterove4420552008-04-30 00:52:44 -07002395 goto relock;
2396 }
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 for (;;) {
2399 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002400
2401 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2402 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002403 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002404
Tejun Heo73ddff22011-06-14 11:20:14 +02002405 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2406 do_jobctl_trap();
2407 spin_unlock_irq(&sighand->siglock);
2408 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 }
2410
Richard Weinberger828b1f62013-10-07 15:26:57 +02002411 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
Tejun Heodd1d6772011-06-02 11:14:00 +02002413 if (!signr)
2414 break; /* will return 0 */
2415
Oleg Nesterov8a352412011-07-21 17:06:53 +02002416 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002417 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002419 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 }
2421
Tejun Heodd1d6772011-06-02 11:14:00 +02002422 ka = &sighand->action[signr-1];
2423
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002424 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002425 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002426
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2428 continue;
2429 if (ka->sa.sa_handler != SIG_DFL) {
2430 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002431 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
2433 if (ka->sa.sa_flags & SA_ONESHOT)
2434 ka->sa.sa_handler = SIG_DFL;
2435
2436 break; /* will return non-zero "signr" value */
2437 }
2438
2439 /*
2440 * Now we are doing the default action for this signal.
2441 */
2442 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2443 continue;
2444
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002445 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002446 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002447 * Container-init gets no signals it doesn't want from same
2448 * container.
2449 *
2450 * Note that if global/container-init sees a sig_kernel_only()
2451 * signal here, the signal must have been generated internally
2452 * or must have come from an ancestor namespace. In either
2453 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002454 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002455 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002456 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 continue;
2458
2459 if (sig_kernel_stop(signr)) {
2460 /*
2461 * The default action is to stop all threads in
2462 * the thread group. The job control signals
2463 * do nothing in an orphaned pgrp, but SIGSTOP
2464 * always works. Note that siglock needs to be
2465 * dropped during the call to is_orphaned_pgrp()
2466 * because of lock ordering with tasklist_lock.
2467 * This allows an intervening SIGCONT to be posted.
2468 * We need to check for that and bail out if necessary.
2469 */
2470 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002471 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473 /* signals can be posted during this window */
2474
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002475 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 goto relock;
2477
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002478 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 }
2480
Richard Weinberger828b1f62013-10-07 15:26:57 +02002481 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 /* It released the siglock. */
2483 goto relock;
2484 }
2485
2486 /*
2487 * We didn't actually stop, due to a race
2488 * with SIGCONT or something like that.
2489 */
2490 continue;
2491 }
2492
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002493 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
2495 /*
2496 * Anything else is fatal, maybe with a core dump.
2497 */
2498 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002501 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002502 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002503 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 /*
2505 * If it was able to dump core, this kills all
2506 * other threads in the group and synchronizes with
2507 * their demise. If we lost the race with another
2508 * thread getting here, it set group_exit_code
2509 * first and our do_group_exit call below will use
2510 * that value and ignore the one we pass it.
2511 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002512 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 }
2514
2515 /*
2516 * Death signals, no core dump.
2517 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002518 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 /* NOTREACHED */
2520 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002521 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002522
2523 ksig->sig = signr;
2524 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525}
2526
Matt Fleming5e6292c2012-01-10 15:11:17 -08002527/**
Al Viroefee9842012-04-28 02:04:15 -04002528 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002529 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002530 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002531 *
Masanari Iidae2278672014-02-18 22:54:36 +09002532 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002533 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002534 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002535 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002536 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002537static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002538{
2539 sigset_t blocked;
2540
Al Viroa610d6e2012-05-21 23:42:15 -04002541 /* A signal was successfully delivered, and the
2542 saved sigmask was stored on the signal frame,
2543 and will be restored by sigreturn. So we can
2544 simply clear the restore sigmask flag. */
2545 clear_restore_sigmask();
2546
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002547 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2548 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2549 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002550 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002551 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002552}
2553
Al Viro2ce5da12012-11-07 15:11:25 -05002554void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2555{
2556 if (failed)
2557 force_sigsegv(ksig->sig, current);
2558 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002559 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002560}
2561
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002562/*
2563 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002564 * group-wide signal. Other threads should be notified now to take
2565 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002566 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002567static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002568{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002569 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002570 struct task_struct *t;
2571
Oleg Nesterovf646e222011-04-27 19:18:39 +02002572 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2573 if (sigisemptyset(&retarget))
2574 return;
2575
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002576 t = tsk;
2577 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002578 if (t->flags & PF_EXITING)
2579 continue;
2580
2581 if (!has_pending_signals(&retarget, &t->blocked))
2582 continue;
2583 /* Remove the signals this thread can handle. */
2584 sigandsets(&retarget, &retarget, &t->blocked);
2585
2586 if (!signal_pending(t))
2587 signal_wake_up(t, 0);
2588
2589 if (sigisemptyset(&retarget))
2590 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002591 }
2592}
2593
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002594void exit_signals(struct task_struct *tsk)
2595{
2596 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002597 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002598
Tejun Heo77e4ef92011-12-12 18:12:21 -08002599 /*
2600 * @tsk is about to have PF_EXITING set - lock out users which
2601 * expect stable threadgroup.
2602 */
Ingo Molnar780de9d2017-02-02 11:50:56 +01002603 cgroup_threadgroup_change_begin(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002604
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002605 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2606 tsk->flags |= PF_EXITING;
Ingo Molnar780de9d2017-02-02 11:50:56 +01002607 cgroup_threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002608 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002609 }
2610
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002611 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002612 /*
2613 * From now this task is not visible for group-wide signals,
2614 * see wants_signal(), do_signal_stop().
2615 */
2616 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002617
Ingo Molnar780de9d2017-02-02 11:50:56 +01002618 cgroup_threadgroup_change_end(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002619
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002620 if (!signal_pending(tsk))
2621 goto out;
2622
Oleg Nesterovf646e222011-04-27 19:18:39 +02002623 unblocked = tsk->blocked;
2624 signotset(&unblocked);
2625 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002626
Tejun Heoa8f072c2011-06-02 11:13:59 +02002627 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c1902e2011-03-23 10:37:00 +01002628 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002629 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002630out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002631 spin_unlock_irq(&tsk->sighand->siglock);
2632
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002633 /*
2634 * If group stop has completed, deliver the notification. This
2635 * should always go to the real parent of the group leader.
2636 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002637 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002638 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002639 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002640 read_unlock(&tasklist_lock);
2641 }
2642}
2643
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644EXPORT_SYMBOL(recalc_sigpending);
2645EXPORT_SYMBOL_GPL(dequeue_signal);
2646EXPORT_SYMBOL(flush_signals);
2647EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648EXPORT_SYMBOL(send_sig);
2649EXPORT_SYMBOL(send_sig_info);
2650EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651
2652/*
2653 * System call entry points.
2654 */
2655
Randy Dunlap41c57892011-04-04 15:00:26 -07002656/**
2657 * sys_restart_syscall - restart a system call
2658 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002659SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002661 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 return restart->fn(restart);
2663}
2664
2665long do_no_restart_syscall(struct restart_block *param)
2666{
2667 return -EINTR;
2668}
2669
Oleg Nesterovb1828012011-04-27 21:56:14 +02002670static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2671{
2672 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2673 sigset_t newblocked;
2674 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002675 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002676 retarget_shared_pending(tsk, &newblocked);
2677 }
2678 tsk->blocked = *newset;
2679 recalc_sigpending();
2680}
2681
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002682/**
2683 * set_current_blocked - change current->blocked mask
2684 * @newset: new mask
2685 *
2686 * It is wrong to change ->blocked directly, this helper should be used
2687 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 */
Al Viro77097ae2012-04-27 13:58:59 -04002689void set_current_blocked(sigset_t *newset)
2690{
Al Viro77097ae2012-04-27 13:58:59 -04002691 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002692 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002693}
2694
2695void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002696{
2697 struct task_struct *tsk = current;
2698
Waiman Longc7be96a2016-12-14 15:04:10 -08002699 /*
2700 * In case the signal mask hasn't changed, there is nothing we need
2701 * to do. The current->blocked shouldn't be modified by other task.
2702 */
2703 if (sigequalsets(&tsk->blocked, newset))
2704 return;
2705
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002706 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002707 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002708 spin_unlock_irq(&tsk->sighand->siglock);
2709}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711/*
2712 * This is also useful for kernel threads that want to temporarily
2713 * (or permanently) block certain signals.
2714 *
2715 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2716 * interface happily blocks "unblockable" signals like SIGKILL
2717 * and friends.
2718 */
2719int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2720{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002721 struct task_struct *tsk = current;
2722 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002724 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002725 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002726 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 switch (how) {
2729 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002730 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 break;
2732 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002733 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 break;
2735 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002736 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 break;
2738 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002739 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002741
Al Viro77097ae2012-04-27 13:58:59 -04002742 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002743 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744}
2745
Randy Dunlap41c57892011-04-04 15:00:26 -07002746/**
2747 * sys_rt_sigprocmask - change the list of currently blocked signals
2748 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002749 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002750 * @oset: previous value of signal mask if non-null
2751 * @sigsetsize: size of sigset_t type
2752 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002753SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002754 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002757 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
2759 /* XXX: Don't preclude handling different sized sigset_t's. */
2760 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002761 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002763 old_set = current->blocked;
2764
2765 if (nset) {
2766 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2767 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2769
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002770 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002772 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002774
2775 if (oset) {
2776 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2777 return -EFAULT;
2778 }
2779
2780 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
2782
Al Viro322a56c2012-12-25 13:32:58 -05002783#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002784COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2785 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786{
Al Viro322a56c2012-12-25 13:32:58 -05002787 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Al Viro322a56c2012-12-25 13:32:58 -05002789 /* XXX: Don't preclude handling different sized sigset_t's. */
2790 if (sigsetsize != sizeof(sigset_t))
2791 return -EINVAL;
2792
2793 if (nset) {
Al Viro322a56c2012-12-25 13:32:58 -05002794 sigset_t new_set;
2795 int error;
Al Viro3968cf62017-09-03 21:45:17 -04002796 if (get_compat_sigset(&new_set, nset))
Al Viro322a56c2012-12-25 13:32:58 -05002797 return -EFAULT;
Al Viro322a56c2012-12-25 13:32:58 -05002798 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2799
2800 error = sigprocmask(how, &new_set, NULL);
2801 if (error)
2802 return error;
2803 }
Dmitry V. Levinf4543222017-08-22 02:16:11 +03002804 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
Al Viro322a56c2012-12-25 13:32:58 -05002805}
2806#endif
Al Viro322a56c2012-12-25 13:32:58 -05002807
Christian Braunerb1d294c2018-08-21 22:00:02 -07002808static void do_sigpending(sigset_t *set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002811 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 &current->signal->shared_pending.signal);
2813 spin_unlock_irq(&current->sighand->siglock);
2814
2815 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002816 sigandsets(set, &current->blocked, set);
Randy Dunlap5aba0852011-04-04 14:59:31 -07002817}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
Randy Dunlap41c57892011-04-04 15:00:26 -07002819/**
2820 * sys_rt_sigpending - examine a pending signal that has been raised
2821 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002822 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002823 * @sigsetsize: size of sigset_t type or larger
2824 */
Al Virofe9c1db2012-12-25 14:31:38 -05002825SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
Al Virofe9c1db2012-12-25 14:31:38 -05002827 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002828
2829 if (sigsetsize > sizeof(*uset))
2830 return -EINVAL;
2831
Christian Braunerb1d294c2018-08-21 22:00:02 -07002832 do_sigpending(&set);
2833
2834 if (copy_to_user(uset, &set, sigsetsize))
2835 return -EFAULT;
2836
2837 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838}
2839
Al Virofe9c1db2012-12-25 14:31:38 -05002840#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002841COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2842 compat_size_t, sigsetsize)
2843{
Al Virofe9c1db2012-12-25 14:31:38 -05002844 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03002845
2846 if (sigsetsize > sizeof(*uset))
2847 return -EINVAL;
2848
Christian Braunerb1d294c2018-08-21 22:00:02 -07002849 do_sigpending(&set);
2850
2851 return put_compat_sigset(uset, &set, sigsetsize);
Al Virofe9c1db2012-12-25 14:31:38 -05002852}
2853#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002854
Eric W. Biedermancc731522017-07-16 22:36:59 -05002855enum siginfo_layout siginfo_layout(int sig, int si_code)
2856{
2857 enum siginfo_layout layout = SIL_KILL;
2858 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2859 static const struct {
2860 unsigned char limit, layout;
2861 } filter[] = {
2862 [SIGILL] = { NSIGILL, SIL_FAULT },
2863 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2864 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2865 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2866 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
Andrew Claytonc3aff082017-11-01 15:49:59 +00002867#if defined(SIGEMT) && defined(NSIGEMT)
Eric W. Biedermancc731522017-07-16 22:36:59 -05002868 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2869#endif
2870 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2871 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002872 [SIGSYS] = { NSIGSYS, SIL_SYS },
Eric W. Biedermancc731522017-07-16 22:36:59 -05002873 };
Eric W. Biederman31931c92018-04-24 20:59:47 -05002874 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
Eric W. Biedermancc731522017-07-16 22:36:59 -05002875 layout = filter[sig].layout;
Eric W. Biederman31931c92018-04-24 20:59:47 -05002876 /* Handle the exceptions */
2877 if ((sig == SIGBUS) &&
2878 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2879 layout = SIL_FAULT_MCEERR;
2880 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2881 layout = SIL_FAULT_BNDERR;
2882#ifdef SEGV_PKUERR
2883 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2884 layout = SIL_FAULT_PKUERR;
2885#endif
2886 }
Eric W. Biedermancc731522017-07-16 22:36:59 -05002887 else if (si_code <= NSIGPOLL)
2888 layout = SIL_POLL;
2889 } else {
2890 if (si_code == SI_TIMER)
2891 layout = SIL_TIMER;
2892 else if (si_code == SI_SIGIO)
2893 layout = SIL_POLL;
2894 else if (si_code < 0)
2895 layout = SIL_RT;
Eric W. Biedermancc731522017-07-16 22:36:59 -05002896 }
2897 return layout;
2898}
2899
Al Viroce395962013-10-13 17:23:53 -04002900int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901{
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002902 if (copy_to_user(to, from , sizeof(struct siginfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 return -EFAULT;
Eric W. Biedermanc999b932018-04-14 13:03:25 -05002904 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905}
2906
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002907#ifdef CONFIG_COMPAT
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002908int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2909 const struct siginfo *from)
2910#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2911{
2912 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2913}
2914int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2915 const struct siginfo *from, bool x32_ABI)
2916#endif
2917{
2918 struct compat_siginfo new;
2919 memset(&new, 0, sizeof(new));
2920
2921 new.si_signo = from->si_signo;
2922 new.si_errno = from->si_errno;
2923 new.si_code = from->si_code;
2924 switch(siginfo_layout(from->si_signo, from->si_code)) {
2925 case SIL_KILL:
2926 new.si_pid = from->si_pid;
2927 new.si_uid = from->si_uid;
2928 break;
2929 case SIL_TIMER:
2930 new.si_tid = from->si_tid;
2931 new.si_overrun = from->si_overrun;
2932 new.si_int = from->si_int;
2933 break;
2934 case SIL_POLL:
2935 new.si_band = from->si_band;
2936 new.si_fd = from->si_fd;
2937 break;
2938 case SIL_FAULT:
2939 new.si_addr = ptr_to_compat(from->si_addr);
2940#ifdef __ARCH_SI_TRAPNO
2941 new.si_trapno = from->si_trapno;
2942#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002943 break;
2944 case SIL_FAULT_MCEERR:
2945 new.si_addr = ptr_to_compat(from->si_addr);
2946#ifdef __ARCH_SI_TRAPNO
2947 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002948#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002949 new.si_addr_lsb = from->si_addr_lsb;
2950 break;
2951 case SIL_FAULT_BNDERR:
2952 new.si_addr = ptr_to_compat(from->si_addr);
2953#ifdef __ARCH_SI_TRAPNO
2954 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002955#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002956 new.si_lower = ptr_to_compat(from->si_lower);
2957 new.si_upper = ptr_to_compat(from->si_upper);
2958 break;
2959 case SIL_FAULT_PKUERR:
2960 new.si_addr = ptr_to_compat(from->si_addr);
2961#ifdef __ARCH_SI_TRAPNO
2962 new.si_trapno = from->si_trapno;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002963#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05002964 new.si_pkey = from->si_pkey;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06002965 break;
2966 case SIL_CHLD:
2967 new.si_pid = from->si_pid;
2968 new.si_uid = from->si_uid;
2969 new.si_status = from->si_status;
2970#ifdef CONFIG_X86_X32_ABI
2971 if (x32_ABI) {
2972 new._sifields._sigchld_x32._utime = from->si_utime;
2973 new._sifields._sigchld_x32._stime = from->si_stime;
2974 } else
2975#endif
2976 {
2977 new.si_utime = from->si_utime;
2978 new.si_stime = from->si_stime;
2979 }
2980 break;
2981 case SIL_RT:
2982 new.si_pid = from->si_pid;
2983 new.si_uid = from->si_uid;
2984 new.si_int = from->si_int;
2985 break;
2986 case SIL_SYS:
2987 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2988 new.si_syscall = from->si_syscall;
2989 new.si_arch = from->si_arch;
2990 break;
2991 }
2992
2993 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2994 return -EFAULT;
2995
2996 return 0;
2997}
2998
Eric W. Biederman212a36a2017-07-31 17:15:31 -05002999int copy_siginfo_from_user32(struct siginfo *to,
3000 const struct compat_siginfo __user *ufrom)
3001{
3002 struct compat_siginfo from;
3003
3004 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3005 return -EFAULT;
3006
3007 clear_siginfo(to);
3008 to->si_signo = from.si_signo;
3009 to->si_errno = from.si_errno;
3010 to->si_code = from.si_code;
3011 switch(siginfo_layout(from.si_signo, from.si_code)) {
3012 case SIL_KILL:
3013 to->si_pid = from.si_pid;
3014 to->si_uid = from.si_uid;
3015 break;
3016 case SIL_TIMER:
3017 to->si_tid = from.si_tid;
3018 to->si_overrun = from.si_overrun;
3019 to->si_int = from.si_int;
3020 break;
3021 case SIL_POLL:
3022 to->si_band = from.si_band;
3023 to->si_fd = from.si_fd;
3024 break;
3025 case SIL_FAULT:
3026 to->si_addr = compat_ptr(from.si_addr);
3027#ifdef __ARCH_SI_TRAPNO
3028 to->si_trapno = from.si_trapno;
3029#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003030 break;
3031 case SIL_FAULT_MCEERR:
3032 to->si_addr = compat_ptr(from.si_addr);
3033#ifdef __ARCH_SI_TRAPNO
3034 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003035#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003036 to->si_addr_lsb = from.si_addr_lsb;
3037 break;
3038 case SIL_FAULT_BNDERR:
3039 to->si_addr = compat_ptr(from.si_addr);
3040#ifdef __ARCH_SI_TRAPNO
3041 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003042#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003043 to->si_lower = compat_ptr(from.si_lower);
3044 to->si_upper = compat_ptr(from.si_upper);
3045 break;
3046 case SIL_FAULT_PKUERR:
3047 to->si_addr = compat_ptr(from.si_addr);
3048#ifdef __ARCH_SI_TRAPNO
3049 to->si_trapno = from.si_trapno;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003050#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003051 to->si_pkey = from.si_pkey;
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003052 break;
3053 case SIL_CHLD:
3054 to->si_pid = from.si_pid;
3055 to->si_uid = from.si_uid;
3056 to->si_status = from.si_status;
3057#ifdef CONFIG_X86_X32_ABI
3058 if (in_x32_syscall()) {
3059 to->si_utime = from._sifields._sigchld_x32._utime;
3060 to->si_stime = from._sifields._sigchld_x32._stime;
3061 } else
3062#endif
3063 {
3064 to->si_utime = from.si_utime;
3065 to->si_stime = from.si_stime;
3066 }
3067 break;
3068 case SIL_RT:
3069 to->si_pid = from.si_pid;
3070 to->si_uid = from.si_uid;
3071 to->si_int = from.si_int;
3072 break;
3073 case SIL_SYS:
3074 to->si_call_addr = compat_ptr(from.si_call_addr);
3075 to->si_syscall = from.si_syscall;
3076 to->si_arch = from.si_arch;
3077 break;
3078 }
3079 return 0;
3080}
3081#endif /* CONFIG_COMPAT */
3082
Randy Dunlap41c57892011-04-04 15:00:26 -07003083/**
Oleg Nesterov943df142011-04-27 21:44:14 +02003084 * do_sigtimedwait - wait for queued signals specified in @which
3085 * @which: queued signals to wait for
3086 * @info: if non-null, the signal's siginfo is returned here
3087 * @ts: upper bound on process time suspension
3088 */
Al Viro1b3c8722017-05-31 04:46:17 -04003089static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003090 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02003091{
Thomas Gleixner2456e852016-12-25 11:38:40 +01003092 ktime_t *to = NULL, timeout = KTIME_MAX;
Oleg Nesterov943df142011-04-27 21:44:14 +02003093 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02003094 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003095 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02003096
3097 if (ts) {
3098 if (!timespec_valid(ts))
3099 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003100 timeout = timespec_to_ktime(*ts);
3101 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02003102 }
3103
3104 /*
3105 * Invert the set of allowed signals to get those we want to block.
3106 */
3107 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3108 signotset(&mask);
3109
3110 spin_lock_irq(&tsk->sighand->siglock);
3111 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2456e852016-12-25 11:38:40 +01003112 if (!sig && timeout) {
Oleg Nesterov943df142011-04-27 21:44:14 +02003113 /*
3114 * None ready, temporarily unblock those we're interested
3115 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02003116 * they arrive. Unblocking is always fine, we can avoid
3117 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02003118 */
3119 tsk->real_blocked = tsk->blocked;
3120 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3121 recalc_sigpending();
3122 spin_unlock_irq(&tsk->sighand->siglock);
3123
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003124 __set_current_state(TASK_INTERRUPTIBLE);
3125 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3126 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02003127 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003128 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07003129 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003130 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02003131 }
3132 spin_unlock_irq(&tsk->sighand->siglock);
3133
3134 if (sig)
3135 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003136 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02003137}
3138
3139/**
Randy Dunlap41c57892011-04-04 15:00:26 -07003140 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3141 * in @uthese
3142 * @uthese: queued signals to wait for
3143 * @uinfo: if non-null, the signal's siginfo is returned here
3144 * @uts: upper bound on process time suspension
3145 * @sigsetsize: size of sigset_t type
3146 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003147SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3148 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3149 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 sigset_t these;
3152 struct timespec ts;
3153 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02003154 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
3156 /* XXX: Don't preclude handling different sized sigset_t's. */
3157 if (sigsetsize != sizeof(sigset_t))
3158 return -EINVAL;
3159
3160 if (copy_from_user(&these, uthese, sizeof(these)))
3161 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07003162
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 if (uts) {
3164 if (copy_from_user(&ts, uts, sizeof(ts)))
3165 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 }
3167
Oleg Nesterov943df142011-04-27 21:44:14 +02003168 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
Oleg Nesterov943df142011-04-27 21:44:14 +02003170 if (ret > 0 && uinfo) {
3171 if (copy_siginfo_to_user(uinfo, &info))
3172 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 }
3174
3175 return ret;
3176}
3177
Al Viro1b3c8722017-05-31 04:46:17 -04003178#ifdef CONFIG_COMPAT
3179COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3180 struct compat_siginfo __user *, uinfo,
3181 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3182{
Al Viro1b3c8722017-05-31 04:46:17 -04003183 sigset_t s;
3184 struct timespec t;
3185 siginfo_t info;
3186 long ret;
3187
3188 if (sigsetsize != sizeof(sigset_t))
3189 return -EINVAL;
3190
Al Viro3968cf62017-09-03 21:45:17 -04003191 if (get_compat_sigset(&s, uthese))
Al Viro1b3c8722017-05-31 04:46:17 -04003192 return -EFAULT;
Al Viro1b3c8722017-05-31 04:46:17 -04003193
3194 if (uts) {
3195 if (compat_get_timespec(&t, uts))
3196 return -EFAULT;
3197 }
3198
3199 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3200
3201 if (ret > 0 && uinfo) {
3202 if (copy_siginfo_to_user32(uinfo, &info))
3203 ret = -EFAULT;
3204 }
3205
3206 return ret;
3207}
3208#endif
3209
Randy Dunlap41c57892011-04-04 15:00:26 -07003210/**
3211 * sys_kill - send a signal to a process
3212 * @pid: the PID of the process
3213 * @sig: signal to be sent
3214 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003215SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216{
3217 struct siginfo info;
3218
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06003219 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 info.si_signo = sig;
3221 info.si_errno = 0;
3222 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003223 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003224 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225
3226 return kill_something_info(sig, &info, pid);
3227}
3228
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003229static int
3230do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003231{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003232 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003233 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003234
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003235 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003236 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003237 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003238 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003239 /*
3240 * The null signal is a permissions and process existence
3241 * probe. No signal is actually delivered.
3242 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003243 if (!error && sig) {
Eric W. Biederman40b3b022018-07-21 10:45:15 -05003244 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003245 /*
3246 * If lock_task_sighand() failed we pretend the task
3247 * dies after receiving the signal. The window is tiny,
3248 * and the signal is private anyway.
3249 */
3250 if (unlikely(error == -ESRCH))
3251 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003252 }
3253 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003254 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003255
3256 return error;
3257}
3258
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003259static int do_tkill(pid_t tgid, pid_t pid, int sig)
3260{
Eric W. Biederman5f749722018-01-22 14:58:57 -06003261 struct siginfo info;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003262
Eric W. Biederman5f749722018-01-22 14:58:57 -06003263 clear_siginfo(&info);
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003264 info.si_signo = sig;
3265 info.si_errno = 0;
3266 info.si_code = SI_TKILL;
3267 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003268 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003269
3270 return do_send_specific(tgid, pid, sig, &info);
3271}
3272
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273/**
3274 * sys_tgkill - send signal to one specific thread
3275 * @tgid: the thread group ID of the thread
3276 * @pid: the PID of the thread
3277 * @sig: signal to be sent
3278 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003279 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 * exists but it's not belonging to the target process anymore. This
3281 * method solves the problem of threads exiting and PIDs getting reused.
3282 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003283SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 /* This is only valid for single tasks */
3286 if (pid <= 0 || tgid <= 0)
3287 return -EINVAL;
3288
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003289 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290}
3291
Randy Dunlap41c57892011-04-04 15:00:26 -07003292/**
3293 * sys_tkill - send signal to one specific task
3294 * @pid: the PID of the task
3295 * @sig: signal to be sent
3296 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3298 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003299SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 /* This is only valid for single tasks */
3302 if (pid <= 0)
3303 return -EINVAL;
3304
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003305 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306}
3307
Al Viro75907d42012-12-25 15:19:12 -05003308static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3309{
3310 /* Not even root can pretend to send signals from the kernel.
3311 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3312 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003313 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003314 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003315 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003316
Al Viro75907d42012-12-25 15:19:12 -05003317 info->si_signo = sig;
3318
3319 /* POSIX.1b doesn't mention process groups. */
3320 return kill_proc_info(sig, info, pid);
3321}
3322
Randy Dunlap41c57892011-04-04 15:00:26 -07003323/**
3324 * sys_rt_sigqueueinfo - send signal information to a signal
3325 * @pid: the PID of the thread
3326 * @sig: signal to be sent
3327 * @uinfo: signal info to be sent
3328 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003329SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3330 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331{
3332 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3334 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003335 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336}
3337
Al Viro75907d42012-12-25 15:19:12 -05003338#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003339COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3340 compat_pid_t, pid,
3341 int, sig,
3342 struct compat_siginfo __user *, uinfo)
3343{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003344 siginfo_t info;
Al Viro75907d42012-12-25 15:19:12 -05003345 int ret = copy_siginfo_from_user32(&info, uinfo);
3346 if (unlikely(ret))
3347 return ret;
3348 return do_rt_sigqueueinfo(pid, sig, &info);
3349}
3350#endif
Al Viro75907d42012-12-25 15:19:12 -05003351
Al Viro9aae8fc2012-12-24 23:12:04 -05003352static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003353{
3354 /* This is only valid for single tasks */
3355 if (pid <= 0 || tgid <= 0)
3356 return -EINVAL;
3357
3358 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003359 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3360 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003361 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3362 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003363 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003364
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003365 info->si_signo = sig;
3366
3367 return do_send_specific(tgid, pid, sig, info);
3368}
3369
3370SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3371 siginfo_t __user *, uinfo)
3372{
3373 siginfo_t info;
3374
3375 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3376 return -EFAULT;
3377
3378 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3379}
3380
Al Viro9aae8fc2012-12-24 23:12:04 -05003381#ifdef CONFIG_COMPAT
3382COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3383 compat_pid_t, tgid,
3384 compat_pid_t, pid,
3385 int, sig,
3386 struct compat_siginfo __user *, uinfo)
3387{
Eric W. Biedermaneb5346c2017-07-31 17:18:40 -05003388 siginfo_t info;
Al Viro9aae8fc2012-12-24 23:12:04 -05003389
3390 if (copy_siginfo_from_user32(&info, uinfo))
3391 return -EFAULT;
3392 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3393}
3394#endif
3395
Oleg Nesterov03417292014-06-06 14:36:53 -07003396/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003397 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003398 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003399void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003400{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003401 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003402 current->sighand->action[sig - 1].sa.sa_handler = action;
3403 if (action == SIG_IGN) {
3404 sigset_t mask;
3405
3406 sigemptyset(&mask);
3407 sigaddset(&mask, sig);
3408
3409 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3410 flush_sigqueue_mask(&mask, &current->pending);
3411 recalc_sigpending();
3412 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003413 spin_unlock_irq(&current->sighand->siglock);
3414}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003415EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003416
Dmitry Safonov68463512016-09-05 16:33:08 +03003417void __weak sigaction_compat_abi(struct k_sigaction *act,
3418 struct k_sigaction *oact)
3419{
3420}
3421
Oleg Nesterov88531f72006-03-28 16:11:24 -08003422int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003424 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 struct k_sigaction *k;
George Anzinger71fabd5e2006-01-08 01:02:48 -08003426 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003428 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 return -EINVAL;
3430
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003431 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003433 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 if (oact)
3435 *oact = *k;
3436
Dmitry Safonov68463512016-09-05 16:33:08 +03003437 sigaction_compat_abi(act, oact);
3438
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003440 sigdelsetmask(&act->sa.sa_mask,
3441 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003442 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 /*
3444 * POSIX 3.3.1.3:
3445 * "Setting a signal action to SIG_IGN for a signal that is
3446 * pending shall cause the pending signal to be discarded,
3447 * whether or not it is blocked."
3448 *
3449 * "Setting a signal action to SIG_DFL for a signal that is
3450 * pending and whose default action is to ignore the signal
3451 * (for example, SIGCHLD), shall cause the pending signal to
3452 * be discarded, whether or not it is blocked"
3453 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003454 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd5e2006-01-08 01:02:48 -08003455 sigemptyset(&mask);
3456 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003457 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3458 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003459 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 }
3462
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003463 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 return 0;
3465}
3466
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003467static int
Al Virobcfe8ad2017-05-27 00:29:34 -04003468do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469{
Al Virobcfe8ad2017-05-27 00:29:34 -04003470 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
Al Virobcfe8ad2017-05-27 00:29:34 -04003472 if (oss) {
3473 memset(oss, 0, sizeof(stack_t));
3474 oss->ss_sp = (void __user *) t->sas_ss_sp;
3475 oss->ss_size = t->sas_ss_size;
3476 oss->ss_flags = sas_ss_flags(sp) |
3477 (current->sas_ss_flags & SS_FLAG_BITS);
3478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
Al Virobcfe8ad2017-05-27 00:29:34 -04003480 if (ss) {
3481 void __user *ss_sp = ss->ss_sp;
3482 size_t ss_size = ss->ss_size;
3483 unsigned ss_flags = ss->ss_flags;
Stas Sergeev407bc162016-04-14 23:20:03 +03003484 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
Al Virobcfe8ad2017-05-27 00:29:34 -04003486 if (unlikely(on_sig_stack(sp)))
3487 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
Stas Sergeev407bc162016-04-14 23:20:03 +03003489 ss_mode = ss_flags & ~SS_FLAG_BITS;
Al Virobcfe8ad2017-05-27 00:29:34 -04003490 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3491 ss_mode != 0))
3492 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493
Stas Sergeev407bc162016-04-14 23:20:03 +03003494 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 ss_size = 0;
3496 ss_sp = NULL;
3497 } else {
Al Virobcfe8ad2017-05-27 00:29:34 -04003498 if (unlikely(ss_size < MINSIGSTKSZ))
3499 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 }
3501
Al Virobcfe8ad2017-05-27 00:29:34 -04003502 t->sas_ss_sp = (unsigned long) ss_sp;
3503 t->sas_ss_size = ss_size;
3504 t->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003506 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507}
Al Virobcfe8ad2017-05-27 00:29:34 -04003508
Al Viro6bf9adf2012-12-14 14:09:47 -05003509SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3510{
Al Virobcfe8ad2017-05-27 00:29:34 -04003511 stack_t new, old;
3512 int err;
3513 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3514 return -EFAULT;
3515 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3516 current_user_stack_pointer());
3517 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3518 err = -EFAULT;
3519 return err;
Al Viro6bf9adf2012-12-14 14:09:47 -05003520}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Al Viro5c495742012-11-18 15:29:16 -05003522int restore_altstack(const stack_t __user *uss)
3523{
Al Virobcfe8ad2017-05-27 00:29:34 -04003524 stack_t new;
3525 if (copy_from_user(&new, uss, sizeof(stack_t)))
3526 return -EFAULT;
3527 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
Al Viro5c495742012-11-18 15:29:16 -05003528 /* squash all but EFAULT for now */
Al Virobcfe8ad2017-05-27 00:29:34 -04003529 return 0;
Al Viro5c495742012-11-18 15:29:16 -05003530}
3531
Al Viroc40702c2012-11-20 14:24:26 -05003532int __save_altstack(stack_t __user *uss, unsigned long sp)
3533{
3534 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003535 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3536 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003537 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003538 if (err)
3539 return err;
3540 if (t->sas_ss_flags & SS_AUTODISARM)
3541 sas_ss_reset(t);
3542 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003543}
3544
Al Viro90268432012-12-14 14:47:53 -05003545#ifdef CONFIG_COMPAT
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003546static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3547 compat_stack_t __user *uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003548{
3549 stack_t uss, uoss;
3550 int ret;
Al Viro90268432012-12-14 14:47:53 -05003551
3552 if (uss_ptr) {
3553 compat_stack_t uss32;
Al Viro90268432012-12-14 14:47:53 -05003554 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3555 return -EFAULT;
3556 uss.ss_sp = compat_ptr(uss32.ss_sp);
3557 uss.ss_flags = uss32.ss_flags;
3558 uss.ss_size = uss32.ss_size;
3559 }
Al Virobcfe8ad2017-05-27 00:29:34 -04003560 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
Al Viro90268432012-12-14 14:47:53 -05003561 compat_user_stack_pointer());
Al Viro90268432012-12-14 14:47:53 -05003562 if (ret >= 0 && uoss_ptr) {
Al Virobcfe8ad2017-05-27 00:29:34 -04003563 compat_stack_t old;
3564 memset(&old, 0, sizeof(old));
3565 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3566 old.ss_flags = uoss.ss_flags;
3567 old.ss_size = uoss.ss_size;
3568 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
Al Viro90268432012-12-14 14:47:53 -05003569 ret = -EFAULT;
3570 }
3571 return ret;
3572}
3573
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003574COMPAT_SYSCALL_DEFINE2(sigaltstack,
3575 const compat_stack_t __user *, uss_ptr,
3576 compat_stack_t __user *, uoss_ptr)
3577{
3578 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3579}
3580
Al Viro90268432012-12-14 14:47:53 -05003581int compat_restore_altstack(const compat_stack_t __user *uss)
3582{
Dominik Brodowski6203deb2018-03-17 17:11:51 +01003583 int err = do_compat_sigaltstack(uss, NULL);
Al Viro90268432012-12-14 14:47:53 -05003584 /* squash all but -EFAULT for now */
3585 return err == -EFAULT ? err : 0;
3586}
Al Viroc40702c2012-11-20 14:24:26 -05003587
3588int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3589{
Stas Sergeev441398d2017-02-27 14:27:25 -08003590 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003591 struct task_struct *t = current;
Stas Sergeev441398d2017-02-27 14:27:25 -08003592 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3593 &uss->ss_sp) |
3594 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003595 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev441398d2017-02-27 14:27:25 -08003596 if (err)
3597 return err;
3598 if (t->sas_ss_flags & SS_AUTODISARM)
3599 sas_ss_reset(t);
3600 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003601}
Al Viro90268432012-12-14 14:47:53 -05003602#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
3604#ifdef __ARCH_WANT_SYS_SIGPENDING
3605
Randy Dunlap41c57892011-04-04 15:00:26 -07003606/**
3607 * sys_sigpending - examine pending signals
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003608 * @uset: where mask of pending signal is returned
Randy Dunlap41c57892011-04-04 15:00:26 -07003609 */
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003610SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611{
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003612 sigset_t set;
Dominik Brodowskid53238c2018-03-11 11:34:37 +01003613
3614 if (sizeof(old_sigset_t) > sizeof(*uset))
3615 return -EINVAL;
3616
Christian Braunerb1d294c2018-08-21 22:00:02 -07003617 do_sigpending(&set);
3618
3619 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3620 return -EFAULT;
3621
3622 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623}
3624
Al Viro8f136212017-05-31 04:42:07 -04003625#ifdef CONFIG_COMPAT
3626COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3627{
3628 sigset_t set;
Christian Braunerb1d294c2018-08-21 22:00:02 -07003629
3630 do_sigpending(&set);
3631
3632 return put_user(set.sig[0], set32);
Al Viro8f136212017-05-31 04:42:07 -04003633}
3634#endif
3635
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636#endif
3637
3638#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003639/**
3640 * sys_sigprocmask - examine and change blocked signals
3641 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003642 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003643 * @oset: previous value of signal mask if non-null
3644 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003645 * Some platforms have their own version with special arguments;
3646 * others support only sys_rt_sigprocmask.
3647 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648
Oleg Nesterovb013c392011-04-28 11:36:20 +02003649SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003650 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003653 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654
Oleg Nesterovb013c392011-04-28 11:36:20 +02003655 old_set = current->blocked.sig[0];
3656
3657 if (nset) {
3658 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3659 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003661 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003665 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 break;
3667 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003668 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 break;
3670 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003671 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003673 default:
3674 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 }
3676
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003677 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003679
3680 if (oset) {
3681 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3682 return -EFAULT;
3683 }
3684
3685 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686}
3687#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3688
Al Viroeaca6ea2012-11-25 23:12:10 -05003689#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003690/**
3691 * sys_rt_sigaction - alter an action taken by a process
3692 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003693 * @act: new sigaction
3694 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003695 * @sigsetsize: size of sigset_t type
3696 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003697SYSCALL_DEFINE4(rt_sigaction, int, sig,
3698 const struct sigaction __user *, act,
3699 struct sigaction __user *, oact,
3700 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701{
3702 struct k_sigaction new_sa, old_sa;
Christian Braunerd8f993b2018-08-21 22:00:07 -07003703 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705 /* XXX: Don't preclude handling different sized sigset_t's. */
3706 if (sigsetsize != sizeof(sigset_t))
Christian Braunerd8f993b2018-08-21 22:00:07 -07003707 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708
Christian Braunerd8f993b2018-08-21 22:00:07 -07003709 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3710 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
3712 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
Christian Braunerd8f993b2018-08-21 22:00:07 -07003713 if (ret)
3714 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
Christian Braunerd8f993b2018-08-21 22:00:07 -07003716 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3717 return -EFAULT;
3718
3719 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720}
Al Viro08d32fe2012-12-25 18:38:15 -05003721#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003722COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3723 const struct compat_sigaction __user *, act,
3724 struct compat_sigaction __user *, oact,
3725 compat_size_t, sigsetsize)
3726{
3727 struct k_sigaction new_ka, old_ka;
Al Viro08d32fe2012-12-25 18:38:15 -05003728#ifdef __ARCH_HAS_SA_RESTORER
3729 compat_uptr_t restorer;
3730#endif
3731 int ret;
3732
3733 /* XXX: Don't preclude handling different sized sigset_t's. */
3734 if (sigsetsize != sizeof(compat_sigset_t))
3735 return -EINVAL;
3736
3737 if (act) {
3738 compat_uptr_t handler;
3739 ret = get_user(handler, &act->sa_handler);
3740 new_ka.sa.sa_handler = compat_ptr(handler);
3741#ifdef __ARCH_HAS_SA_RESTORER
3742 ret |= get_user(restorer, &act->sa_restorer);
3743 new_ka.sa.sa_restorer = compat_ptr(restorer);
3744#endif
Al Viro3968cf62017-09-03 21:45:17 -04003745 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003746 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003747 if (ret)
3748 return -EFAULT;
Al Viro08d32fe2012-12-25 18:38:15 -05003749 }
3750
3751 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3752 if (!ret && oact) {
Al Viro08d32fe2012-12-25 18:38:15 -05003753 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3754 &oact->sa_handler);
Dmitry V. Levinf4543222017-08-22 02:16:11 +03003755 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3756 sizeof(oact->sa_mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003757 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003758#ifdef __ARCH_HAS_SA_RESTORER
3759 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3760 &oact->sa_restorer);
3761#endif
3762 }
3763 return ret;
3764}
3765#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003766#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
Al Viro495dfbf2012-12-25 19:09:45 -05003768#ifdef CONFIG_OLD_SIGACTION
3769SYSCALL_DEFINE3(sigaction, int, sig,
3770 const struct old_sigaction __user *, act,
3771 struct old_sigaction __user *, oact)
3772{
3773 struct k_sigaction new_ka, old_ka;
3774 int ret;
3775
3776 if (act) {
3777 old_sigset_t mask;
3778 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3779 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3780 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3781 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3782 __get_user(mask, &act->sa_mask))
3783 return -EFAULT;
3784#ifdef __ARCH_HAS_KA_RESTORER
3785 new_ka.ka_restorer = NULL;
3786#endif
3787 siginitset(&new_ka.sa.sa_mask, mask);
3788 }
3789
3790 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3791
3792 if (!ret && oact) {
3793 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3794 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3795 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3796 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3797 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3798 return -EFAULT;
3799 }
3800
3801 return ret;
3802}
3803#endif
3804#ifdef CONFIG_COMPAT_OLD_SIGACTION
3805COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3806 const struct compat_old_sigaction __user *, act,
3807 struct compat_old_sigaction __user *, oact)
3808{
3809 struct k_sigaction new_ka, old_ka;
3810 int ret;
3811 compat_old_sigset_t mask;
3812 compat_uptr_t handler, restorer;
3813
3814 if (act) {
3815 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3816 __get_user(handler, &act->sa_handler) ||
3817 __get_user(restorer, &act->sa_restorer) ||
3818 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3819 __get_user(mask, &act->sa_mask))
3820 return -EFAULT;
3821
3822#ifdef __ARCH_HAS_KA_RESTORER
3823 new_ka.ka_restorer = NULL;
3824#endif
3825 new_ka.sa.sa_handler = compat_ptr(handler);
3826 new_ka.sa.sa_restorer = compat_ptr(restorer);
3827 siginitset(&new_ka.sa.sa_mask, mask);
3828 }
3829
3830 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3831
3832 if (!ret && oact) {
3833 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3834 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3835 &oact->sa_handler) ||
3836 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3837 &oact->sa_restorer) ||
3838 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3839 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3840 return -EFAULT;
3841 }
3842 return ret;
3843}
3844#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845
Fabian Frederickf6187762014-06-04 16:11:12 -07003846#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847
3848/*
3849 * For backwards compatibility. Functionality superseded by sigprocmask.
3850 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003851SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852{
3853 /* SMP safe */
3854 return current->blocked.sig[0];
3855}
3856
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003857SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003859 int old = current->blocked.sig[0];
3860 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003862 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003863 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864
3865 return old;
3866}
Fabian Frederickf6187762014-06-04 16:11:12 -07003867#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868
3869#ifdef __ARCH_WANT_SYS_SIGNAL
3870/*
3871 * For backwards compatibility. Functionality superseded by sigaction.
3872 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003873SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874{
3875 struct k_sigaction new_sa, old_sa;
3876 int ret;
3877
3878 new_sa.sa.sa_handler = handler;
3879 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003880 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881
3882 ret = do_sigaction(sig, &new_sa, &old_sa);
3883
3884 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3885}
3886#endif /* __ARCH_WANT_SYS_SIGNAL */
3887
3888#ifdef __ARCH_WANT_SYS_PAUSE
3889
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003890SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003892 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003893 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003894 schedule();
3895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 return -ERESTARTNOHAND;
3897}
3898
3899#endif
3900
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003901static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003902{
Al Viro68f3f162012-05-21 21:42:32 -04003903 current->saved_sigmask = current->blocked;
3904 set_current_blocked(set);
3905
Sasha Levin823dd322016-02-05 15:36:05 -08003906 while (!signal_pending(current)) {
3907 __set_current_state(TASK_INTERRUPTIBLE);
3908 schedule();
3909 }
Al Viro68f3f162012-05-21 21:42:32 -04003910 set_restore_sigmask();
3911 return -ERESTARTNOHAND;
3912}
Al Viro68f3f162012-05-21 21:42:32 -04003913
Randy Dunlap41c57892011-04-04 15:00:26 -07003914/**
3915 * sys_rt_sigsuspend - replace the signal mask for a value with the
3916 * @unewset value until a signal is received
3917 * @unewset: new signal mask value
3918 * @sigsetsize: size of sigset_t type
3919 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003920SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003921{
3922 sigset_t newset;
3923
3924 /* XXX: Don't preclude handling different sized sigset_t's. */
3925 if (sigsetsize != sizeof(sigset_t))
3926 return -EINVAL;
3927
3928 if (copy_from_user(&newset, unewset, sizeof(newset)))
3929 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003930 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003931}
Al Viroad4b65a2012-12-24 21:43:56 -05003932
3933#ifdef CONFIG_COMPAT
3934COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3935{
Al Viroad4b65a2012-12-24 21:43:56 -05003936 sigset_t newset;
Al Viroad4b65a2012-12-24 21:43:56 -05003937
3938 /* XXX: Don't preclude handling different sized sigset_t's. */
3939 if (sigsetsize != sizeof(sigset_t))
3940 return -EINVAL;
3941
Al Viro3968cf62017-09-03 21:45:17 -04003942 if (get_compat_sigset(&newset, unewset))
Al Viroad4b65a2012-12-24 21:43:56 -05003943 return -EFAULT;
Al Viroad4b65a2012-12-24 21:43:56 -05003944 return sigsuspend(&newset);
Al Viroad4b65a2012-12-24 21:43:56 -05003945}
3946#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003947
Al Viro0a0e8cd2012-12-25 16:04:12 -05003948#ifdef CONFIG_OLD_SIGSUSPEND
3949SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3950{
3951 sigset_t blocked;
3952 siginitset(&blocked, mask);
3953 return sigsuspend(&blocked);
3954}
3955#endif
3956#ifdef CONFIG_OLD_SIGSUSPEND3
3957SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3958{
3959 sigset_t blocked;
3960 siginitset(&blocked, mask);
3961 return sigsuspend(&blocked);
3962}
3963#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07003965__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003966{
3967 return NULL;
3968}
3969
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970void __init signals_init(void)
3971{
Helge Deller41b27152016-03-22 14:27:54 -07003972 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3973 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3974 != offsetof(struct siginfo, _sifields._pad));
Eric W. Biedermanaba1be22017-07-19 21:23:15 -05003975 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
Helge Deller41b27152016-03-22 14:27:54 -07003976
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003977 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003979
3980#ifdef CONFIG_KGDB_KDB
3981#include <linux/kdb.h>
3982/*
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003983 * kdb_send_sig - Allows kdb to send signals without exposing
Jason Wessel67fc4e02010-05-20 21:04:21 -05003984 * signal internals. This function checks if the required locks are
3985 * available before calling the main signal code, to avoid kdb
3986 * deadlocks.
3987 */
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003988void kdb_send_sig(struct task_struct *t, int sig)
Jason Wessel67fc4e02010-05-20 21:04:21 -05003989{
3990 static struct task_struct *kdb_prev_t;
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05003991 int new_t, ret;
Jason Wessel67fc4e02010-05-20 21:04:21 -05003992 if (!spin_trylock(&t->sighand->siglock)) {
3993 kdb_printf("Can't do kill command now.\n"
3994 "The sigmask lock is held somewhere else in "
3995 "kernel, try again later\n");
3996 return;
3997 }
Jason Wessel67fc4e02010-05-20 21:04:21 -05003998 new_t = kdb_prev_t != t;
3999 kdb_prev_t = t;
4000 if (t->state != TASK_RUNNING && new_t) {
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004001 spin_unlock(&t->sighand->siglock);
Jason Wessel67fc4e02010-05-20 21:04:21 -05004002 kdb_printf("Process is not RUNNING, sending a signal from "
4003 "kdb risks deadlock\n"
4004 "on the run queue locks. "
4005 "The signal has _not_ been sent.\n"
4006 "Reissue the kill command if you want to risk "
4007 "the deadlock.\n");
4008 return;
4009 }
Eric W. Biedermanb2139842018-07-20 15:49:17 -05004010 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004011 spin_unlock(&t->sighand->siglock);
4012 if (ret)
Jason Wessel67fc4e02010-05-20 21:04:21 -05004013 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4014 sig, t->pid);
4015 else
4016 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4017}
4018#endif /* CONFIG_KGDB_KDB */