blob: 4075ece592f2b8ddf7347987aea402e30c47f589 [file] [log] [blame]
Shailabh Nagarc7572492006-07-14 00:24:40 -07001/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
Jay Lanf3cef7a2006-09-30 23:28:55 -070021#include <linux/tsacct_kern.h>
Shailabh Nagar6f449932006-07-14 00:24:41 -070022#include <linux/delayacct.h>
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070023#include <linux/cpumask.h>
24#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070026#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
Eric W. Biederman4bd6e322012-02-07 17:56:49 -080030#include <linux/pid_namespace.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070031#include <net/genetlink.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070033
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070034/*
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 */
38#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39
Vegard Nossumb81f3ea2008-07-25 01:48:55 -070040static DEFINE_PER_CPU(__u32, taskstats_seqnum);
Shailabh Nagarc7572492006-07-14 00:24:40 -070041static int family_registered;
Christoph Lametere18b8902006-12-06 20:33:20 -080042struct kmem_cache *taskstats_cache;
Shailabh Nagarc7572492006-07-14 00:24:40 -070043
Johannes Berg489111e2016-10-24 14:40:03 +020044static struct genl_family family;
Shailabh Nagarc7572492006-07-14 00:24:40 -070045
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000046static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Shailabh Nagarc7572492006-07-14 00:24:40 -070047 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
48 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070049 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
50 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
51
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000052static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
Balbir Singh846c7bb2007-10-18 23:39:44 -070053 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
54};
55
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070056struct listener {
57 struct list_head list;
58 pid_t pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -070059 char valid;
Shailabh Nagarc7572492006-07-14 00:24:40 -070060};
61
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070062struct listener_list {
63 struct rw_semaphore sem;
64 struct list_head list;
65};
66static DEFINE_PER_CPU(struct listener_list, listener_array);
67
68enum actions {
69 REGISTER,
70 DEREGISTER,
71 CPU_DONT_CARE
72};
Shailabh Nagarc7572492006-07-14 00:24:40 -070073
74static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
Oleg Nesterov371674852006-12-06 20:36:55 -080075 size_t size)
Shailabh Nagarc7572492006-07-14 00:24:40 -070076{
77 struct sk_buff *skb;
78 void *reply;
79
80 /*
81 * If new attributes are added, please revisit this allocation
82 */
Thomas Graf3dabc712006-11-14 19:44:52 -080083 skb = genlmsg_new(size, GFP_KERNEL);
Shailabh Nagarc7572492006-07-14 00:24:40 -070084 if (!skb)
85 return -ENOMEM;
86
87 if (!info) {
Christoph Lametercd85fc52010-12-08 17:42:22 +010088 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
Shailabh Nagarc7572492006-07-14 00:24:40 -070089
Thomas Graf17c157c2006-11-14 19:46:02 -080090 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070091 } else
Thomas Graf17c157c2006-11-14 19:46:02 -080092 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070093 if (reply == NULL) {
94 nlmsg_free(skb);
95 return -EINVAL;
96 }
97
98 *skbp = skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -070099 return 0;
100}
101
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700102/*
103 * Send taskstats data in @skb to listener with nl_pid @pid
104 */
Johannes Berg134e6372009-07-10 09:51:34 +0000105static int send_reply(struct sk_buff *skb, struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700106{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700107 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700108 void *reply = genlmsg_data(genlhdr);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700109
Johannes Berg053c0952015-01-16 22:09:00 +0100110 genlmsg_end(skb, reply);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700111
Johannes Berg134e6372009-07-10 09:51:34 +0000112 return genlmsg_reply(skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700113}
114
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700115/*
116 * Send taskstats data in @skb to listeners registered for @cpu's exit data
117 */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800118static void send_cpu_listeners(struct sk_buff *skb,
119 struct listener_list *listeners)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700120{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700121 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700122 struct listener *s, *tmp;
123 struct sk_buff *skb_next, *skb_cur = skb;
124 void *reply = genlmsg_data(genlhdr);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700125 int rc, delcount = 0;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700126
Johannes Berg053c0952015-01-16 22:09:00 +0100127 genlmsg_end(skb, reply);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700128
129 rc = 0;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700130 down_read(&listeners->sem);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700131 list_for_each_entry(s, &listeners->list, list) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700132 skb_next = NULL;
133 if (!list_is_last(&s->list, &listeners->list)) {
134 skb_next = skb_clone(skb_cur, GFP_KERNEL);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700135 if (!skb_next)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700136 break;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700137 }
Johannes Berg134e6372009-07-10 09:51:34 +0000138 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700139 if (rc == -ECONNREFUSED) {
Shailabh Nagarbb129992006-07-14 00:24:47 -0700140 s->valid = 0;
141 delcount++;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700142 }
143 skb_cur = skb_next;
144 }
Shailabh Nagarbb129992006-07-14 00:24:47 -0700145 up_read(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700146
Shailabh Nagard94a0412006-07-30 03:03:11 -0700147 if (skb_cur)
148 nlmsg_free(skb_cur);
149
Shailabh Nagarbb129992006-07-14 00:24:47 -0700150 if (!delcount)
Shailabh Nagard94a0412006-07-30 03:03:11 -0700151 return;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700152
153 /* Delete invalidated entries */
154 down_write(&listeners->sem);
155 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
156 if (!s->valid) {
157 list_del(&s->list);
158 kfree(s);
159 }
160 }
161 up_write(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700162}
163
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800164static void fill_stats(struct user_namespace *user_ns,
165 struct pid_namespace *pid_ns,
166 struct task_struct *tsk, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700167{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800168 memset(stats, 0, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700169 /*
170 * Each accounting subsystem adds calls to its functions to
171 * fill in relevant parts of struct taskstsats as follows
172 *
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700173 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700174 */
175
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700176 delayacct_add_tsk(stats, tsk);
Jay Lanf3cef7a2006-09-30 23:28:55 -0700177
178 /* fill in basic acct fields */
Shailabh Nagar6f449932006-07-14 00:24:41 -0700179 stats->version = TASKSTATS_VERSION;
Maxim Uvarovb663a792007-07-15 23:40:48 -0700180 stats->nvcsw = tsk->nvcsw;
181 stats->nivcsw = tsk->nivcsw;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800182 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700183
Jay Lan9acc1852006-09-30 23:28:58 -0700184 /* fill in extended acct fields */
185 xacct_add_tsk(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700186}
187
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700188static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700189{
Oleg Nesterova98b6092006-10-28 10:38:54 -0700190 struct task_struct *tsk;
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700191
192 rcu_read_lock();
193 tsk = find_task_by_vpid(pid);
194 if (tsk)
195 get_task_struct(tsk);
196 rcu_read_unlock();
197 if (!tsk)
198 return -ESRCH;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800199 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700200 put_task_struct(tsk);
201 return 0;
202}
203
204static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
205{
206 struct task_struct *tsk, *first;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700207 unsigned long flags;
Oleg Nesterova98b6092006-10-28 10:38:54 -0700208 int rc = -ESRCH;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700209
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700210 /*
211 * Add additional stats from live tasks except zombie thread group
212 * leaders who are already counted with the dead tasks
213 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700214 rcu_read_lock();
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700215 first = find_task_by_vpid(tgid);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700216
Oleg Nesterova98b6092006-10-28 10:38:54 -0700217 if (!first || !lock_task_sighand(first, &flags))
218 goto out;
219
220 if (first->signal->stats)
221 memcpy(stats, first->signal->stats, sizeof(*stats));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800222 else
223 memset(stats, 0, sizeof(*stats));
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700224
Shailabh Nagarc7572492006-07-14 00:24:40 -0700225 tsk = first;
226 do {
Oleg Nesterovd7c3f5f2006-10-28 10:38:54 -0700227 if (tsk->exit_state)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700228 continue;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700229 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700230 * Accounting subsystem can call its functions here to
Shailabh Nagarc7572492006-07-14 00:24:40 -0700231 * fill in relevant parts of struct taskstsats as follows
232 *
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700233 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700234 */
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700235 delayacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700236
Maxim Uvarovb663a792007-07-15 23:40:48 -0700237 stats->nvcsw += tsk->nvcsw;
238 stats->nivcsw += tsk->nivcsw;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700239 } while_each_thread(first, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700240
Oleg Nesterova98b6092006-10-28 10:38:54 -0700241 unlock_task_sighand(first, &flags);
242 rc = 0;
243out:
244 rcu_read_unlock();
245
246 stats->version = TASKSTATS_VERSION;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700247 /*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200248 * Accounting subsystems can also add calls here to modify
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700249 * fields of taskstats.
Shailabh Nagarc7572492006-07-14 00:24:40 -0700250 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700251 return rc;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700252}
253
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700254static void fill_tgid_exit(struct task_struct *tsk)
255{
256 unsigned long flags;
257
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700258 spin_lock_irqsave(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700259 if (!tsk->signal->stats)
260 goto ret;
261
262 /*
263 * Each accounting subsystem calls its functions here to
264 * accumalate its per-task stats for tsk, into the per-tgid structure
265 *
266 * per-task-foo(tsk->signal->stats, tsk);
267 */
268 delayacct_add_tsk(tsk->signal->stats, tsk);
269ret:
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700270 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700271 return;
272}
273
Rusty Russell41c7bb92009-01-01 10:12:28 +1030274static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700275{
276 struct listener_list *listeners;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700277 struct listener *s, *tmp, *s2;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700278 unsigned int cpu;
Chen Gang0d206332013-11-12 15:11:23 -0800279 int ret = 0;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700280
Rusty Russell41c7bb92009-01-01 10:12:28 +1030281 if (!cpumask_subset(mask, cpu_possible_mask))
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700282 return -EINVAL;
283
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800284 if (current_user_ns() != &init_user_ns)
285 return -EINVAL;
286
287 if (task_active_pid_ns(current) != &init_pid_ns)
288 return -EINVAL;
289
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700290 if (isadd == REGISTER) {
Rusty Russell41c7bb92009-01-01 10:12:28 +1030291 for_each_cpu(cpu, mask) {
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700292 s = kmalloc_node(sizeof(struct listener),
293 GFP_KERNEL, cpu_to_node(cpu));
Chen Gang0d206332013-11-12 15:11:23 -0800294 if (!s) {
295 ret = -ENOMEM;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700296 goto cleanup;
Chen Gang0d206332013-11-12 15:11:23 -0800297 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700298 s->pid = pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700299 s->valid = 1;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700300
301 listeners = &per_cpu(listener_array, cpu);
302 down_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700303 list_for_each_entry(s2, &listeners->list, list) {
Oleg Nesterova7295892011-08-03 16:21:05 -0700304 if (s2->pid == pid && s2->valid)
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700305 goto exists;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700306 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700307 list_add(&s->list, &listeners->list);
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700308 s = NULL;
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700309exists:
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700310 up_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700311 kfree(s); /* nop if NULL */
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700312 }
313 return 0;
314 }
315
316 /* Deregister or cleanup */
317cleanup:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030318 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700319 listeners = &per_cpu(listener_array, cpu);
320 down_write(&listeners->sem);
321 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
322 if (s->pid == pid) {
323 list_del(&s->list);
324 kfree(s);
325 break;
326 }
327 }
328 up_write(&listeners->sem);
329 }
Chen Gang0d206332013-11-12 15:11:23 -0800330 return ret;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700331}
332
Rusty Russell41c7bb92009-01-01 10:12:28 +1030333static int parse(struct nlattr *na, struct cpumask *mask)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700334{
335 char *data;
336 int len;
337 int ret;
338
339 if (na == NULL)
340 return 1;
341 len = nla_len(na);
342 if (len > TASKSTATS_CPUMASK_MAXLEN)
343 return -E2BIG;
344 if (len < 1)
345 return -EINVAL;
346 data = kmalloc(len, GFP_KERNEL);
347 if (!data)
348 return -ENOMEM;
349 nla_strlcpy(data, na, len);
Rusty Russell29c01772008-12-13 21:20:25 +1030350 ret = cpulist_parse(data, mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700351 kfree(data);
352 return ret;
353}
354
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800355static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
Oleg Nesterov68062b82006-12-06 20:36:53 -0800356{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800357 struct nlattr *na, *ret;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800358 int aggr;
359
Oleg Nesterov371674852006-12-06 20:36:55 -0800360 aggr = (type == TASKSTATS_TYPE_PID)
361 ? TASKSTATS_TYPE_AGGR_PID
362 : TASKSTATS_TYPE_AGGR_TGID;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800363
364 na = nla_nest_start(skb, aggr);
Oleg Nesterov371674852006-12-06 20:36:55 -0800365 if (!na)
366 goto err;
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800367
Chen Gang3fa58262013-11-12 15:11:22 -0800368 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
369 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800370 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800371 }
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200372 ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
373 sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
Chen Gang3fa58262013-11-12 15:11:22 -0800374 if (!ret) {
375 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800376 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800377 }
Oleg Nesterov68062b82006-12-06 20:36:53 -0800378 nla_nest_end(skb, na);
379
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800380 return nla_data(ret);
381err:
382 return NULL;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800383}
384
Balbir Singh846c7bb2007-10-18 23:39:44 -0700385static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
386{
387 int rc = 0;
388 struct sk_buff *rep_skb;
389 struct cgroupstats *stats;
390 struct nlattr *na;
391 size_t size;
392 u32 fd;
Al Viro2903ff02012-08-28 12:52:22 -0400393 struct fd f;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700394
395 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
396 if (!na)
397 return -EINVAL;
398
399 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
Al Viro2903ff02012-08-28 12:52:22 -0400400 f = fdget(fd);
401 if (!f.file)
Adrian Bunkf9615982007-11-14 17:00:37 -0800402 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700403
Adrian Bunkf9615982007-11-14 17:00:37 -0800404 size = nla_total_size(sizeof(struct cgroupstats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700405
Adrian Bunkf9615982007-11-14 17:00:37 -0800406 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
407 size);
408 if (rc < 0)
409 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700410
Adrian Bunkf9615982007-11-14 17:00:37 -0800411 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
412 sizeof(struct cgroupstats));
Alan Cox25353b32012-07-30 14:42:49 -0700413 if (na == NULL) {
Jesper Juhl0324b5a2012-10-04 17:16:52 -0700414 nlmsg_free(rep_skb);
Alan Cox25353b32012-07-30 14:42:49 -0700415 rc = -EMSGSIZE;
416 goto err;
417 }
418
Adrian Bunkf9615982007-11-14 17:00:37 -0800419 stats = nla_data(na);
420 memset(stats, 0, sizeof(*stats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700421
Al Virob5830432014-10-31 01:22:04 -0400422 rc = cgroupstats_build(stats, f.file->f_path.dentry);
Adrian Bunkf9615982007-11-14 17:00:37 -0800423 if (rc < 0) {
424 nlmsg_free(rep_skb);
425 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700426 }
427
Johannes Berg134e6372009-07-10 09:51:34 +0000428 rc = send_reply(rep_skb, info);
Adrian Bunkf9615982007-11-14 17:00:37 -0800429
Balbir Singh846c7bb2007-10-18 23:39:44 -0700430err:
Al Viro2903ff02012-08-28 12:52:22 -0400431 fdput(f);
Balbir Singh846c7bb2007-10-18 23:39:44 -0700432 return rc;
433}
434
Michael Holzheu93233122010-10-27 15:34:44 -0700435static int cmd_attr_register_cpumask(struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700436{
Rusty Russell41c7bb92009-01-01 10:12:28 +1030437 cpumask_var_t mask;
Michael Holzheu93233122010-10-27 15:34:44 -0700438 int rc;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700439
Rusty Russell41c7bb92009-01-01 10:12:28 +1030440 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
441 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030442 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700443 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700444 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000445 rc = add_del_listener(info->snd_portid, mask, REGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700446out:
447 free_cpumask_var(mask);
448 return rc;
449}
Rusty Russell41c7bb92009-01-01 10:12:28 +1030450
Michael Holzheu93233122010-10-27 15:34:44 -0700451static int cmd_attr_deregister_cpumask(struct genl_info *info)
452{
453 cpumask_var_t mask;
454 int rc;
455
456 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
457 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030458 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
459 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700460 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000461 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700462out:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030463 free_cpumask_var(mask);
Michael Holzheu93233122010-10-27 15:34:44 -0700464 return rc;
465}
Shailabh Nagarc7572492006-07-14 00:24:40 -0700466
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800467static size_t taskstats_packet_size(void)
468{
469 size_t size;
470
471 size = nla_total_size(sizeof(u32)) +
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200472 nla_total_size_64bit(sizeof(struct taskstats)) +
473 nla_total_size(0);
474
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800475 return size;
476}
477
Michael Holzheu93233122010-10-27 15:34:44 -0700478static int cmd_attr_pid(struct genl_info *info)
479{
480 struct taskstats *stats;
481 struct sk_buff *rep_skb;
482 size_t size;
483 u32 pid;
484 int rc;
485
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800486 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700487
Oleg Nesterov371674852006-12-06 20:36:55 -0800488 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700489 if (rc < 0)
490 return rc;
491
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800492 rc = -EINVAL;
Michael Holzheu93233122010-10-27 15:34:44 -0700493 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
494 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
495 if (!stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700496 goto err;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700497
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700498 rc = fill_stats_for_pid(pid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700499 if (rc < 0)
500 goto err;
Johannes Berg134e6372009-07-10 09:51:34 +0000501 return send_reply(rep_skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700502err:
503 nlmsg_free(rep_skb);
504 return rc;
505}
506
Michael Holzheu93233122010-10-27 15:34:44 -0700507static int cmd_attr_tgid(struct genl_info *info)
508{
509 struct taskstats *stats;
510 struct sk_buff *rep_skb;
511 size_t size;
512 u32 tgid;
513 int rc;
514
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800515 size = taskstats_packet_size();
Michael Holzheu93233122010-10-27 15:34:44 -0700516
517 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
518 if (rc < 0)
519 return rc;
520
521 rc = -EINVAL;
522 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
523 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
524 if (!stats)
525 goto err;
526
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700527 rc = fill_stats_for_tgid(tgid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700528 if (rc < 0)
529 goto err;
530 return send_reply(rep_skb, info);
531err:
532 nlmsg_free(rep_skb);
533 return rc;
534}
535
536static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
537{
538 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
539 return cmd_attr_register_cpumask(info);
540 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
541 return cmd_attr_deregister_cpumask(info);
542 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
543 return cmd_attr_pid(info);
544 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
545 return cmd_attr_tgid(info);
546 else
547 return -EINVAL;
548}
549
Oleg Nesterov34ec12342006-12-06 20:36:52 -0800550static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
551{
552 struct signal_struct *sig = tsk->signal;
553 struct taskstats *stats;
554
555 if (sig->stats || thread_group_empty(tsk))
556 goto ret;
557
558 /* No problem if kmem_cache_zalloc() fails */
559 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
560
561 spin_lock_irq(&tsk->sighand->siglock);
562 if (!sig->stats) {
563 sig->stats = stats;
564 stats = NULL;
565 }
566 spin_unlock_irq(&tsk->sighand->siglock);
567
568 if (stats)
569 kmem_cache_free(taskstats_cache, stats);
570ret:
571 return sig->stats;
572}
573
Shailabh Nagarc7572492006-07-14 00:24:40 -0700574/* Send pid data out on exit */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800575void taskstats_exit(struct task_struct *tsk, int group_dead)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700576{
577 int rc;
Oleg Nesterov115085e2006-12-06 20:36:51 -0800578 struct listener_list *listeners;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800579 struct taskstats *stats;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700580 struct sk_buff *rep_skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700581 size_t size;
582 int is_thread_group;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700583
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800584 if (!family_registered)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700585 return;
586
Shailabh Nagarc7572492006-07-14 00:24:40 -0700587 /*
588 * Size includes space for nested attributes
589 */
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800590 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700591
Oleg Nesterov34ec12342006-12-06 20:36:52 -0800592 is_thread_group = !!taskstats_tgid_alloc(tsk);
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800593 if (is_thread_group) {
594 /* PID + STATS + TGID + STATS */
595 size = 2 * size;
596 /* fill the tsk->signal->stats structure */
597 fill_tgid_exit(tsk);
598 }
599
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500600 listeners = raw_cpu_ptr(&listener_array);
Oleg Nesterov115085e2006-12-06 20:36:51 -0800601 if (list_empty(&listeners->list))
602 return;
603
Oleg Nesterov371674852006-12-06 20:36:55 -0800604 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700605 if (rc < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800606 return;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700607
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800608 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
609 task_pid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800610 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800611 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800612
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800613 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700614
Shailabh Nagarc7572492006-07-14 00:24:40 -0700615 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700616 * Doesn't matter if tsk is the leader or the last group member leaving
Shailabh Nagarc7572492006-07-14 00:24:40 -0700617 */
Oleg Nesterov68062b82006-12-06 20:36:53 -0800618 if (!is_thread_group || !group_dead)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700619 goto send;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700620
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800621 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
622 task_tgid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800623 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800624 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800625
626 memcpy(stats, tsk->signal->stats, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700627
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700628send:
Oleg Nesterov115085e2006-12-06 20:36:51 -0800629 send_cpu_listeners(rep_skb, listeners);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700630 return;
Oleg Nesterov371674852006-12-06 20:36:55 -0800631err:
Shailabh Nagarc7572492006-07-14 00:24:40 -0700632 nlmsg_free(rep_skb);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700633}
634
Johannes Berg4534de82013-11-14 17:14:46 +0100635static const struct genl_ops taskstats_ops[] = {
Johannes Berg88d36a92013-11-14 17:14:39 +0100636 {
637 .cmd = TASKSTATS_CMD_GET,
638 .doit = taskstats_user_cmd,
639 .policy = taskstats_cmd_get_policy,
640 .flags = GENL_ADMIN_PERM,
641 },
642 {
643 .cmd = CGROUPSTATS_CMD_GET,
644 .doit = cgroupstats_user_cmd,
645 .policy = cgroupstats_cmd_get_policy,
646 },
Balbir Singh846c7bb2007-10-18 23:39:44 -0700647};
648
Johannes Berg489111e2016-10-24 14:40:03 +0200649static struct genl_family family = {
650 .name = TASKSTATS_GENL_NAME,
651 .version = TASKSTATS_GENL_VERSION,
652 .maxattr = TASKSTATS_CMD_ATTR_MAX,
653 .module = THIS_MODULE,
654 .ops = taskstats_ops,
655 .n_ops = ARRAY_SIZE(taskstats_ops),
656};
657
Shailabh Nagarc7572492006-07-14 00:24:40 -0700658/* Needed early in initialization */
659void __init taskstats_init_early(void)
660{
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700661 unsigned int i;
662
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700663 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700664 for_each_possible_cpu(i) {
665 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
666 init_rwsem(&(per_cpu(listener_array, i).sem));
667 }
Shailabh Nagarc7572492006-07-14 00:24:40 -0700668}
669
670static int __init taskstats_init(void)
671{
672 int rc;
673
Johannes Berg489111e2016-10-24 14:40:03 +0200674 rc = genl_register_family(&family);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700675 if (rc)
676 return rc;
677
Shailabh Nagarc7572492006-07-14 00:24:40 -0700678 family_registered = 1;
Mandeep Singh Bainesf9b182e2011-03-23 16:43:27 -0700679 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700680 return 0;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700681}
682
683/*
684 * late initcall ensures initialization of statistics collection
685 * mechanisms precedes initialization of the taskstats interface
686 */
687late_initcall(taskstats_init);