blob: 3e55bbd31ad1578f898f5558eae7e594ec7d2bc8 [file] [log] [blame]
Tejun Heob4a04ab2015-05-13 15:38:40 -04001/*
2 * linux/cgroup-defs.h - basic definitions for cgroup
3 *
4 * This file provides basic type and interface. Include this file directly
5 * only if necessary to avoid cyclic dependencies.
6 */
7#ifndef _LINUX_CGROUP_DEFS_H
8#define _LINUX_CGROUP_DEFS_H
9
10#include <linux/limits.h>
11#include <linux/list.h>
12#include <linux/idr.h>
13#include <linux/wait.h>
14#include <linux/mutex.h>
15#include <linux/rcupdate.h>
Elena Reshetova4b9502e62017-03-08 10:00:40 +020016#include <linux/refcount.h>
Tejun Heob4a04ab2015-05-13 15:38:40 -040017#include <linux/percpu-refcount.h>
Tejun Heo7d7efec2015-05-13 16:35:16 -040018#include <linux/percpu-rwsem.h>
Tejun Heo041cd642017-09-25 08:12:05 -070019#include <linux/u64_stats_sync.h>
Tejun Heob4a04ab2015-05-13 15:38:40 -040020#include <linux/workqueue.h>
Daniel Mack30070982016-11-23 16:52:26 +010021#include <linux/bpf-cgroup.h>
Tejun Heob4a04ab2015-05-13 15:38:40 -040022
23#ifdef CONFIG_CGROUPS
24
25struct cgroup;
26struct cgroup_root;
27struct cgroup_subsys;
28struct cgroup_taskset;
29struct kernfs_node;
30struct kernfs_ops;
31struct kernfs_open_file;
Arnd Bergmannc80ef9e2015-05-29 10:52:59 +020032struct seq_file;
Tejun Heob4a04ab2015-05-13 15:38:40 -040033
34#define MAX_CGROUP_TYPE_NAMELEN 32
35#define MAX_CGROUP_ROOT_NAMELEN 64
36#define MAX_CFTYPE_NAME 64
37
38/* define the enumeration of all cgroup subsystems */
39#define SUBSYS(_x) _x ## _cgrp_id,
40enum cgroup_subsys_id {
41#include <linux/cgroup_subsys.h>
42 CGROUP_SUBSYS_COUNT,
43};
44#undef SUBSYS
45
46/* bits in struct cgroup_subsys_state flags field */
47enum {
48 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
49 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
50 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
Tejun Heo88cb04b2016-03-03 09:57:58 -050051 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
Waiman Long33c35aa2017-05-15 09:34:06 -040052 CSS_DYING = (1 << 4), /* css is dying */
Tejun Heob4a04ab2015-05-13 15:38:40 -040053};
54
55/* bits in struct cgroup flags field */
56enum {
57 /* Control Group requires release notifications to userspace */
58 CGRP_NOTIFY_ON_RELEASE,
59 /*
60 * Clone the parent's configuration when creating a new child
61 * cpuset cgroup. For historical reasons, this option can be
62 * specified at mount time and thus is implemented here.
63 */
64 CGRP_CPUSET_CLONE_CHILDREN,
65};
66
67/* cgroup_root->flags */
68enum {
Tejun Heob4a04ab2015-05-13 15:38:40 -040069 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
70 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
Tejun Heo5136f632017-06-27 14:30:28 -040071
72 /*
73 * Consider namespaces as delegation boundaries. If this flag is
74 * set, controller specific interface files in a namespace root
75 * aren't writeable from inside the namespace.
76 */
77 CGRP_ROOT_NS_DELEGATE = (1 << 3),
Waiman Longe1cba4b2017-08-17 15:33:09 -040078
79 /*
80 * Enable cpuset controller in v1 cgroup to use v2 behavior.
81 */
82 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
Tejun Heob4a04ab2015-05-13 15:38:40 -040083};
84
85/* cftype->flags */
86enum {
87 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
88 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
Tejun Heo5136f632017-06-27 14:30:28 -040089 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
90
Tejun Heob4a04ab2015-05-13 15:38:40 -040091 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
Tejun Heo7dbdb192015-09-18 17:54:23 -040092 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
Tejun Heob4a04ab2015-05-13 15:38:40 -040093
94 /* internal flags, do not use outside cgroup core proper */
95 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
96 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
97};
98
99/*
Tejun Heo6f60ead2015-09-18 17:54:23 -0400100 * cgroup_file is the handle for a file instance created in a cgroup which
101 * is used, for example, to generate file changed notifications. This can
102 * be obtained by setting cftype->file_offset.
103 */
104struct cgroup_file {
105 /* do not access any fields from outside cgroup core */
Tejun Heo6f60ead2015-09-18 17:54:23 -0400106 struct kernfs_node *kn;
107};
108
109/*
Tejun Heob4a04ab2015-05-13 15:38:40 -0400110 * Per-subsystem/per-cgroup state maintained by the system. This is the
111 * fundamental structural building block that controllers deal with.
112 *
113 * Fields marked with "PI:" are public and immutable and may be accessed
114 * directly without synchronization.
115 */
116struct cgroup_subsys_state {
117 /* PI: the cgroup that this css is attached to */
118 struct cgroup *cgroup;
119
120 /* PI: the cgroup subsystem that this css is attached to */
121 struct cgroup_subsys *ss;
122
123 /* reference count - access via css_[try]get() and css_put() */
124 struct percpu_ref refcnt;
125
Tejun Heob4a04ab2015-05-13 15:38:40 -0400126 /* siblings list anchored at the parent's ->children */
127 struct list_head sibling;
128 struct list_head children;
129
130 /*
131 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
132 * matching css can be looked up using css_from_id().
133 */
134 int id;
135
136 unsigned int flags;
137
138 /*
139 * Monotonically increasing unique serial number which defines a
140 * uniform order among all csses. It's guaranteed that all
141 * ->children lists are in the ascending order of ->serial_nr and
142 * used to allow interrupting and resuming iterations.
143 */
144 u64 serial_nr;
145
Tejun Heoaa226ff2016-01-21 15:31:11 -0500146 /*
147 * Incremented by online self and children. Used to guarantee that
148 * parents are not offlined before their children.
149 */
150 atomic_t online_cnt;
151
Tejun Heob4a04ab2015-05-13 15:38:40 -0400152 /* percpu_ref killing and RCU release */
153 struct rcu_head rcu_head;
154 struct work_struct destroy_work;
Todd Poynorb8b1a2e2017-04-06 18:47:57 -0700155
156 /*
157 * PI: the parent css. Placed here for cache proximity to following
158 * fields of the containing structure.
159 */
160 struct cgroup_subsys_state *parent;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400161};
162
163/*
164 * A css_set is a structure holding pointers to a set of
165 * cgroup_subsys_state objects. This saves space in the task struct
166 * object and speeds up fork()/exit(), since a single inc/dec and a
167 * list_add()/del() can bump the reference count on the entire cgroup
168 * set for a task.
169 */
170struct css_set {
Tejun Heo5f617ebb2016-12-27 14:49:05 -0500171 /*
172 * Set of subsystem states, one for each subsystem. This array is
173 * immutable after creation apart from the init_css_set during
174 * subsystem registration (at boot time).
175 */
176 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
177
178 /* reference count */
Elena Reshetova4b9502e62017-03-08 10:00:40 +0200179 refcount_t refcount;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400180
Tejun Heo454000a2017-05-15 09:34:02 -0400181 /*
182 * For a domain cgroup, the following points to self. If threaded,
183 * to the matching cset of the nearest domain ancestor. The
184 * dom_cset provides access to the domain cgroup and its csses to
185 * which domain level resource consumptions should be charged.
186 */
187 struct css_set *dom_cset;
188
Tejun Heo5f617ebb2016-12-27 14:49:05 -0500189 /* the default cgroup associated with this css_set */
190 struct cgroup *dfl_cgrp;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400191
Waiman Long73a72422017-06-13 17:18:01 -0400192 /* internal task count, protected by css_set_lock */
193 int nr_tasks;
194
Tejun Heob4a04ab2015-05-13 15:38:40 -0400195 /*
196 * Lists running through all tasks using this cgroup group.
197 * mg_tasks lists tasks which belong to this cset but are in the
198 * process of being migrated out or in. Protected by
199 * css_set_rwsem, but, during migration, once tasks are moved to
200 * mg_tasks, it can be read safely while holding cgroup_mutex.
201 */
202 struct list_head tasks;
203 struct list_head mg_tasks;
204
Tejun Heo5f617ebb2016-12-27 14:49:05 -0500205 /* all css_task_iters currently walking this cset */
206 struct list_head task_iters;
207
208 /*
209 * On the default hierarhcy, ->subsys[ssid] may point to a css
210 * attached to an ancestor instead of the cgroup this css_set is
211 * associated with. The following node is anchored at
212 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
213 * iterate through all css's attached to a given cgroup.
214 */
215 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
216
Tejun Heo454000a2017-05-15 09:34:02 -0400217 /* all threaded csets whose ->dom_cset points to this cset */
218 struct list_head threaded_csets;
219 struct list_head threaded_csets_node;
220
Tejun Heo5f617ebb2016-12-27 14:49:05 -0500221 /*
222 * List running through all cgroup groups in the same hash
223 * slot. Protected by css_set_lock
224 */
225 struct hlist_node hlist;
226
Tejun Heob4a04ab2015-05-13 15:38:40 -0400227 /*
228 * List of cgrp_cset_links pointing at cgroups referenced from this
229 * css_set. Protected by css_set_lock.
230 */
231 struct list_head cgrp_links;
232
Tejun Heob4a04ab2015-05-13 15:38:40 -0400233 /*
234 * List of csets participating in the on-going migration either as
235 * source or destination. Protected by cgroup_mutex.
236 */
237 struct list_head mg_preload_node;
238 struct list_head mg_node;
239
240 /*
241 * If this cset is acting as the source of migration the following
Tejun Heoe4857982016-03-08 11:51:26 -0500242 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
243 * respectively the source and destination cgroups of the on-going
244 * migration. mg_dst_cset is the destination cset the target tasks
245 * on this cset should be migrated to. Protected by cgroup_mutex.
Tejun Heob4a04ab2015-05-13 15:38:40 -0400246 */
247 struct cgroup *mg_src_cgrp;
Tejun Heoe4857982016-03-08 11:51:26 -0500248 struct cgroup *mg_dst_cgrp;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400249 struct css_set *mg_dst_cset;
250
Tejun Heo2b021cb2016-03-15 20:43:04 -0400251 /* dead and being drained, ignore for migration */
252 bool dead;
253
Tejun Heob4a04ab2015-05-13 15:38:40 -0400254 /* For RCU-protected deletion */
255 struct rcu_head rcu_head;
256};
257
Tejun Heo041cd642017-09-25 08:12:05 -0700258/*
259 * cgroup basic resource usage statistics. Accounting is done per-cpu in
260 * cgroup_cpu_stat which is then lazily propagated up the hierarchy on
261 * reads.
262 *
263 * When a stat gets updated, the cgroup_cpu_stat and its ancestors are
264 * linked into the updated tree. On the following read, propagation only
265 * considers and consumes the updated tree. This makes reading O(the
266 * number of descendants which have been active since last read) instead of
267 * O(the total number of descendants).
268 *
269 * This is important because there can be a lot of (draining) cgroups which
270 * aren't active and stat may be read frequently. The combination can
271 * become very expensive. By propagating selectively, increasing reading
272 * frequency decreases the cost of each read.
273 */
274struct cgroup_cpu_stat {
275 /*
276 * ->sync protects all the current counters. These are the only
277 * fields which get updated in the hot path.
278 */
279 struct u64_stats_sync sync;
280 struct task_cputime cputime;
281
282 /*
283 * Snapshots at the last reading. These are used to calculate the
284 * deltas to propagate to the global counters.
285 */
286 struct task_cputime last_cputime;
287
288 /*
289 * Child cgroups with stat updates on this cpu since the last read
290 * are linked on the parent's ->updated_children through
291 * ->updated_next.
292 *
293 * In addition to being more compact, singly-linked list pointing
294 * to the cgroup makes it unnecessary for each per-cpu struct to
295 * point back to the associated cgroup.
296 *
297 * Protected by per-cpu cgroup_cpu_stat_lock.
298 */
299 struct cgroup *updated_children; /* terminated by self cgroup */
300 struct cgroup *updated_next; /* NULL iff not on the list */
301};
302
303struct cgroup_stat {
304 /* per-cpu statistics are collected into the folowing global counters */
305 struct task_cputime cputime;
306 struct prev_cputime prev_cputime;
307};
308
Tejun Heob4a04ab2015-05-13 15:38:40 -0400309struct cgroup {
310 /* self css with NULL ->ss, points back to this cgroup */
311 struct cgroup_subsys_state self;
312
313 unsigned long flags; /* "unsigned long" so bitops work */
314
315 /*
316 * idr allocated in-hierarchy ID.
317 *
318 * ID 0 is not used, the ID of the root cgroup is always 1, and a
319 * new cgroup will be assigned with a smallest available ID.
320 *
321 * Allocating/Removing ID must be protected by cgroup_mutex.
322 */
323 int id;
324
325 /*
Tejun Heob11cfb52015-11-20 15:55:52 -0500326 * The depth this cgroup is at. The root is at depth zero and each
327 * step down the hierarchy increments the level. This along with
328 * ancestor_ids[] can determine whether a given cgroup is a
329 * descendant of another without traversing the hierarchy.
330 */
331 int level;
332
Roman Gushchin1a926e02017-07-28 18:28:44 +0100333 /* Maximum allowed descent tree depth */
334 int max_depth;
335
Tejun Heob11cfb52015-11-20 15:55:52 -0500336 /*
Roman Gushchin0679dee2017-08-02 17:55:29 +0100337 * Keep track of total numbers of visible and dying descent cgroups.
338 * Dying cgroups are cgroups which were deleted by a user,
339 * but are still existing because someone else is holding a reference.
Roman Gushchin1a926e02017-07-28 18:28:44 +0100340 * max_descendants is a maximum allowed number of descent cgroups.
Roman Gushchin0679dee2017-08-02 17:55:29 +0100341 */
342 int nr_descendants;
343 int nr_dying_descendants;
Roman Gushchin1a926e02017-07-28 18:28:44 +0100344 int max_descendants;
Roman Gushchin0679dee2017-08-02 17:55:29 +0100345
346 /*
Tejun Heo0de09422015-10-15 16:41:49 -0400347 * Each non-empty css_set associated with this cgroup contributes
Tejun Heo788b9502017-07-16 21:43:33 -0400348 * one to nr_populated_csets. The counter is zero iff this cgroup
349 * doesn't have any tasks.
350 *
351 * All children which have non-zero nr_populated_csets and/or
Tejun Heo454000a2017-05-15 09:34:02 -0400352 * nr_populated_children of their own contribute one to either
353 * nr_populated_domain_children or nr_populated_threaded_children
354 * depending on their type. Each counter is zero iff all cgroups
355 * of the type in the subtree proper don't have any tasks.
Tejun Heob4a04ab2015-05-13 15:38:40 -0400356 */
Tejun Heo788b9502017-07-16 21:43:33 -0400357 int nr_populated_csets;
Tejun Heo454000a2017-05-15 09:34:02 -0400358 int nr_populated_domain_children;
359 int nr_populated_threaded_children;
360
361 int nr_threaded_children; /* # of live threaded child cgroups */
Tejun Heob4a04ab2015-05-13 15:38:40 -0400362
363 struct kernfs_node *kn; /* cgroup kernfs entry */
Tejun Heo6f60ead2015-09-18 17:54:23 -0400364 struct cgroup_file procs_file; /* handle for "cgroup.procs" */
365 struct cgroup_file events_file; /* handle for "cgroup.events" */
Tejun Heob4a04ab2015-05-13 15:38:40 -0400366
367 /*
368 * The bitmask of subsystems enabled on the child cgroups.
369 * ->subtree_control is the one configured through
Tejun Heo8699b772016-02-22 22:25:46 -0500370 * "cgroup.subtree_control" while ->child_ss_mask is the effective
371 * one which may have more subsystems enabled. Controller knobs
372 * are made available iff it's enabled in ->subtree_control.
Tejun Heob4a04ab2015-05-13 15:38:40 -0400373 */
Tejun Heo6e5c8302016-02-22 22:25:47 -0500374 u16 subtree_control;
375 u16 subtree_ss_mask;
Tejun Heo15a27c32016-03-03 09:57:59 -0500376 u16 old_subtree_control;
377 u16 old_subtree_ss_mask;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400378
379 /* Private pointers for each registered subsystem */
380 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
381
382 struct cgroup_root *root;
383
384 /*
385 * List of cgrp_cset_links pointing at css_sets with tasks in this
386 * cgroup. Protected by css_set_lock.
387 */
388 struct list_head cset_links;
389
390 /*
391 * On the default hierarchy, a css_set for a cgroup with some
392 * susbsys disabled will point to css's which are associated with
393 * the closest ancestor which has the subsys enabled. The
394 * following lists all css_sets which point to this cgroup's css
395 * for the given subsystem.
396 */
397 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
398
399 /*
Tejun Heo454000a2017-05-15 09:34:02 -0400400 * If !threaded, self. If threaded, it points to the nearest
401 * domain ancestor. Inside a threaded subtree, cgroups are exempt
402 * from process granularity and no-internal-task constraint.
403 * Domain level resource consumptions which aren't tied to a
404 * specific task are charged to the dom_cgrp.
405 */
406 struct cgroup *dom_cgrp;
407
Tejun Heo041cd642017-09-25 08:12:05 -0700408 /* cgroup basic resource statistics */
409 struct cgroup_cpu_stat __percpu *cpu_stat;
410 struct cgroup_stat pending_stat; /* pending from children */
411 struct cgroup_stat stat;
412
Tejun Heo454000a2017-05-15 09:34:02 -0400413 /*
Tejun Heob4a04ab2015-05-13 15:38:40 -0400414 * list of pidlists, up to two for each namespace (one for procs, one
415 * for tasks); created on demand.
416 */
417 struct list_head pidlists;
418 struct mutex pidlist_mutex;
419
420 /* used to wait for offlining of csses */
421 wait_queue_head_t offline_waitq;
422
423 /* used to schedule release agent */
424 struct work_struct release_agent_work;
Tejun Heob11cfb52015-11-20 15:55:52 -0500425
Daniel Mack30070982016-11-23 16:52:26 +0100426 /* used to store eBPF programs */
427 struct cgroup_bpf bpf;
428
Tejun Heob11cfb52015-11-20 15:55:52 -0500429 /* ids of the ancestors at each level including self */
430 int ancestor_ids[];
Tejun Heob4a04ab2015-05-13 15:38:40 -0400431};
432
433/*
434 * A cgroup_root represents the root of a cgroup hierarchy, and may be
435 * associated with a kernfs_root to form an active hierarchy. This is
436 * internal to cgroup core. Don't access directly from controllers.
437 */
438struct cgroup_root {
439 struct kernfs_root *kf_root;
440
441 /* The bitmask of subsystems attached to this hierarchy */
442 unsigned int subsys_mask;
443
444 /* Unique id for this hierarchy. */
445 int hierarchy_id;
446
447 /* The root cgroup. Root is destroyed on its release. */
448 struct cgroup cgrp;
449
Tejun Heob11cfb52015-11-20 15:55:52 -0500450 /* for cgrp->ancestor_ids[0] */
451 int cgrp_ancestor_id_storage;
452
Tejun Heob4a04ab2015-05-13 15:38:40 -0400453 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
454 atomic_t nr_cgrps;
455
456 /* A list running through the active hierarchies */
457 struct list_head root_list;
458
459 /* Hierarchy-specific flags */
460 unsigned int flags;
461
462 /* IDs for cgroups in this hierarchy */
463 struct idr cgroup_idr;
464
465 /* The path to use for release notifications. */
466 char release_agent_path[PATH_MAX];
467
468 /* The name for this hierarchy - may be empty */
469 char name[MAX_CGROUP_ROOT_NAMELEN];
470};
471
472/*
473 * struct cftype: handler definitions for cgroup control files
474 *
475 * When reading/writing to a file:
476 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
477 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
478 */
479struct cftype {
480 /*
481 * By convention, the name should begin with the name of the
482 * subsystem, followed by a period. Zero length string indicates
483 * end of cftype array.
484 */
485 char name[MAX_CFTYPE_NAME];
Tejun Heo731a9812015-08-11 13:35:42 -0400486 unsigned long private;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400487
488 /*
489 * The maximum length of string, excluding trailing nul, that can
490 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
491 */
492 size_t max_write_len;
493
494 /* CFTYPE_* flags */
495 unsigned int flags;
496
497 /*
Tejun Heo6f60ead2015-09-18 17:54:23 -0400498 * If non-zero, should contain the offset from the start of css to
499 * a struct cgroup_file field. cgroup will record the handle of
500 * the created file into it. The recorded handle can be used as
501 * long as the containing css remains accessible.
502 */
503 unsigned int file_offset;
504
505 /*
Tejun Heob4a04ab2015-05-13 15:38:40 -0400506 * Fields used for internal bookkeeping. Initialized automatically
507 * during registration.
508 */
509 struct cgroup_subsys *ss; /* NULL for cgroup core files */
510 struct list_head node; /* anchored at ss->cfts */
511 struct kernfs_ops *kf_ops;
512
Tejun Heoe90cbeb2016-12-27 14:49:03 -0500513 int (*open)(struct kernfs_open_file *of);
514 void (*release)(struct kernfs_open_file *of);
515
Tejun Heob4a04ab2015-05-13 15:38:40 -0400516 /*
517 * read_u64() is a shortcut for the common case of returning a
518 * single integer. Use it in place of read()
519 */
520 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
521 /*
522 * read_s64() is a signed version of read_u64()
523 */
524 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
525
526 /* generic seq_file read interface */
527 int (*seq_show)(struct seq_file *sf, void *v);
528
529 /* optional ops, implement all or none */
530 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
531 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
532 void (*seq_stop)(struct seq_file *sf, void *v);
533
534 /*
535 * write_u64() is a shortcut for the common case of accepting
536 * a single integer (as parsed by simple_strtoull) from
537 * userspace. Use in place of write(); return 0 or error.
538 */
539 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
540 u64 val);
541 /*
542 * write_s64() is a signed version of write_u64()
543 */
544 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
545 s64 val);
546
547 /*
548 * write() is the generic write callback which maps directly to
549 * kernfs write operation and overrides all other operations.
550 * Maximum write size is determined by ->max_write_len. Use
551 * of_css/cft() to access the associated css and cft.
552 */
553 ssize_t (*write)(struct kernfs_open_file *of,
554 char *buf, size_t nbytes, loff_t off);
555
556#ifdef CONFIG_DEBUG_LOCK_ALLOC
557 struct lock_class_key lockdep_key;
558#endif
559};
560
561/*
562 * Control Group subsystem type.
563 * See Documentation/cgroups/cgroups.txt for details
564 */
565struct cgroup_subsys {
566 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
567 int (*css_online)(struct cgroup_subsys_state *css);
568 void (*css_offline)(struct cgroup_subsys_state *css);
569 void (*css_released)(struct cgroup_subsys_state *css);
570 void (*css_free)(struct cgroup_subsys_state *css);
571 void (*css_reset)(struct cgroup_subsys_state *css);
Tejun Heob4a04ab2015-05-13 15:38:40 -0400572
Tejun Heo1f7dd3e52015-12-03 10:18:21 -0500573 int (*can_attach)(struct cgroup_taskset *tset);
574 void (*cancel_attach)(struct cgroup_taskset *tset);
575 void (*attach)(struct cgroup_taskset *tset);
Tejun Heo5cf1cac2016-04-21 19:06:48 -0400576 void (*post_attach)(void);
Oleg Nesterovb53202e2015-12-03 10:24:08 -0500577 int (*can_fork)(struct task_struct *task);
578 void (*cancel_fork)(struct task_struct *task);
579 void (*fork)(struct task_struct *task);
Tejun Heo2e91fa72015-10-15 16:41:53 -0400580 void (*exit)(struct task_struct *task);
Tejun Heoafcf6c82015-10-15 16:41:53 -0400581 void (*free)(struct task_struct *task);
Tejun Heob4a04ab2015-05-13 15:38:40 -0400582 void (*bind)(struct cgroup_subsys_state *root_css);
583
Tejun Heob38e42e2016-02-23 10:00:50 -0500584 bool early_init:1;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400585
586 /*
Tejun Heof6d635ad2016-03-08 11:51:26 -0500587 * If %true, the controller, on the default hierarchy, doesn't show
588 * up in "cgroup.controllers" or "cgroup.subtree_control", is
589 * implicitly enabled on all cgroups on the default hierarchy, and
590 * bypasses the "no internal process" constraint. This is for
591 * utility type controllers which is transparent to userland.
592 *
593 * An implicit controller can be stolen from the default hierarchy
594 * anytime and thus must be okay with offline csses from previous
595 * hierarchies coexisting with csses for the current one.
596 */
597 bool implicit_on_dfl:1;
598
599 /*
Tejun Heo8cfd8142017-07-21 11:14:51 -0400600 * If %true, the controller, supports threaded mode on the default
601 * hierarchy. In a threaded subtree, both process granularity and
602 * no-internal-process constraint are ignored and a threaded
603 * controllers should be able to handle that.
604 *
605 * Note that as an implicit controller is automatically enabled on
606 * all cgroups on the default hierarchy, it should also be
607 * threaded. implicit && !threaded is not supported.
608 */
609 bool threaded:1;
610
611 /*
Tejun Heob4a04ab2015-05-13 15:38:40 -0400612 * If %false, this subsystem is properly hierarchical -
613 * configuration, resource accounting and restriction on a parent
614 * cgroup cover those of its children. If %true, hierarchy support
615 * is broken in some ways - some subsystems ignore hierarchy
616 * completely while others are only implemented half-way.
617 *
618 * It's now disallowed to create nested cgroups if the subsystem is
619 * broken and cgroup core will emit a warning message on such
620 * cases. Eventually, all subsystems will be made properly
621 * hierarchical and this will go away.
622 */
Tejun Heob38e42e2016-02-23 10:00:50 -0500623 bool broken_hierarchy:1;
624 bool warned_broken_hierarchy:1;
Tejun Heob4a04ab2015-05-13 15:38:40 -0400625
626 /* the following two fields are initialized automtically during boot */
627 int id;
628 const char *name;
629
Tejun Heo3e1d2ee2015-08-18 13:58:16 -0700630 /* optional, initialized automatically during boot if not set */
631 const char *legacy_name;
632
Tejun Heob4a04ab2015-05-13 15:38:40 -0400633 /* link to parent, protected by cgroup_lock() */
634 struct cgroup_root *root;
635
636 /* idr for css->id */
637 struct idr css_idr;
638
639 /*
640 * List of cftypes. Each entry is the first entry of an array
641 * terminated by zero length name.
642 */
643 struct list_head cfts;
644
645 /*
646 * Base cftypes which are automatically registered. The two can
647 * point to the same array.
648 */
649 struct cftype *dfl_cftypes; /* for the default hierarchy */
650 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
651
652 /*
653 * A subsystem may depend on other subsystems. When such subsystem
654 * is enabled on a cgroup, the depended-upon subsystems are enabled
655 * together if available. Subsystems enabled due to dependency are
656 * not visible to userland until explicitly enabled. The following
657 * specifies the mask of subsystems that this one depends on.
658 */
659 unsigned int depends_on;
660};
661
Tejun Heo1ed13282015-09-16 12:53:17 -0400662extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
663
664/**
665 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
666 * @tsk: target task
667 *
Ingo Molnar780de9d2017-02-02 11:50:56 +0100668 * Allows cgroup operations to synchronize against threadgroup changes
669 * using a percpu_rw_semaphore.
Tejun Heo1ed13282015-09-16 12:53:17 -0400670 */
671static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
672{
673 percpu_down_read(&cgroup_threadgroup_rwsem);
674}
675
676/**
677 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
678 * @tsk: target task
679 *
Ingo Molnar780de9d2017-02-02 11:50:56 +0100680 * Counterpart of cgroup_threadcgroup_change_begin().
Tejun Heo1ed13282015-09-16 12:53:17 -0400681 */
682static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
683{
684 percpu_up_read(&cgroup_threadgroup_rwsem);
685}
Tejun Heo7d7efec2015-05-13 16:35:16 -0400686
687#else /* CONFIG_CGROUPS */
688
Aleksa Saraicb4a3162015-06-06 10:02:14 +1000689#define CGROUP_SUBSYS_COUNT 0
690
Ingo Molnar780de9d2017-02-02 11:50:56 +0100691static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
692{
693 might_sleep();
694}
695
Tejun Heo7d7efec2015-05-13 16:35:16 -0400696static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
697
Tejun Heob4a04ab2015-05-13 15:38:40 -0400698#endif /* CONFIG_CGROUPS */
Tejun Heo7d7efec2015-05-13 16:35:16 -0400699
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500700#ifdef CONFIG_SOCK_CGROUP_DATA
701
Tejun Heobd1060a2015-12-07 17:38:53 -0500702/*
703 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
704 * per-socket cgroup information except for memcg association.
705 *
706 * On legacy hierarchies, net_prio and net_cls controllers directly set
707 * attributes on each sock which can then be tested by the network layer.
708 * On the default hierarchy, each sock is associated with the cgroup it was
709 * created in and the networking layer can match the cgroup directly.
710 *
711 * To avoid carrying all three cgroup related fields separately in sock,
712 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
713 * On boot, sock_cgroup_data records the cgroup that the sock was created
714 * in so that cgroup2 matches can be made; however, once either net_prio or
715 * net_cls starts being used, the area is overriden to carry prioidx and/or
716 * classid. The two modes are distinguished by whether the lowest bit is
717 * set. Clear bit indicates cgroup pointer while set bit prioidx and
718 * classid.
719 *
720 * While userland may start using net_prio or net_cls at any time, once
721 * either is used, cgroup2 matching no longer works. There is no reason to
722 * mix the two and this is in line with how legacy and v2 compatibility is
723 * handled. On mode switch, cgroup references which are already being
724 * pointed to by socks may be leaked. While this can be remedied by adding
725 * synchronization around sock_cgroup_data, given that the number of leaked
726 * cgroups is bound and highly unlikely to be high, this seems to be the
727 * better trade-off.
728 */
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500729struct sock_cgroup_data {
Tejun Heobd1060a2015-12-07 17:38:53 -0500730 union {
731#ifdef __LITTLE_ENDIAN
732 struct {
733 u8 is_data;
734 u8 padding;
735 u16 prioidx;
736 u32 classid;
737 } __packed;
738#else
739 struct {
740 u32 classid;
741 u16 prioidx;
742 u8 padding;
743 u8 is_data;
744 } __packed;
745#endif
746 u64 val;
747 };
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500748};
749
Tejun Heobd1060a2015-12-07 17:38:53 -0500750/*
751 * There's a theoretical window where the following accessors race with
752 * updaters and return part of the previous pointer as the prioidx or
753 * classid. Such races are short-lived and the result isn't critical.
754 */
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500755static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
756{
Tejun Heobd1060a2015-12-07 17:38:53 -0500757 /* fallback to 1 which is always the ID of the root cgroup */
758 return (skcd->is_data & 1) ? skcd->prioidx : 1;
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500759}
760
761static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
762{
Tejun Heobd1060a2015-12-07 17:38:53 -0500763 /* fallback to 0 which is the unconfigured default classid */
764 return (skcd->is_data & 1) ? skcd->classid : 0;
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500765}
766
Tejun Heobd1060a2015-12-07 17:38:53 -0500767/*
768 * If invoked concurrently, the updaters may clobber each other. The
769 * caller is responsible for synchronization.
770 */
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500771static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
772 u16 prioidx)
773{
Tejun Heoad2c8c72015-12-09 12:30:46 -0500774 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
Tejun Heobd1060a2015-12-07 17:38:53 -0500775
776 if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
777 return;
778
779 if (!(skcd_buf.is_data & 1)) {
780 skcd_buf.val = 0;
781 skcd_buf.is_data = 1;
782 }
783
784 skcd_buf.prioidx = prioidx;
785 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500786}
787
788static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
789 u32 classid)
790{
Tejun Heoad2c8c72015-12-09 12:30:46 -0500791 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
Tejun Heobd1060a2015-12-07 17:38:53 -0500792
793 if (sock_cgroup_classid(&skcd_buf) == classid)
794 return;
795
796 if (!(skcd_buf.is_data & 1)) {
797 skcd_buf.val = 0;
798 skcd_buf.is_data = 1;
799 }
800
801 skcd_buf.classid = classid;
802 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
Tejun Heo2a56a1f2015-12-07 17:38:52 -0500803}
804
805#else /* CONFIG_SOCK_CGROUP_DATA */
806
807struct sock_cgroup_data {
808};
809
810#endif /* CONFIG_SOCK_CGROUP_DATA */
811
Tejun Heob4a04ab2015-05-13 15:38:40 -0400812#endif /* _LINUX_CGROUP_DEFS_H */