locking/lockdep: Untangle xhlock history save/restore from task independence

Where XHLOCK_{SOFT,HARD} are save/restore points in the xhlocks[] to
ensure the temporal IRQ events don't interact with task state, the
XHLOCK_PROC is a fundament different beast that just happens to share
the interface.

The purpose of XHLOCK_PROC is to annotate independent execution inside
one task. For example workqueues, each work should appear to run in its
own 'pristine' 'task'.

Remove XHLOCK_PROC in favour of its own interface to avoid confusion.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boqun.feng@gmail.com
Cc: david@fromorbit.com
Cc: johannes@sipsolutions.net
Cc: kernel-team@lge.com
Cc: oleg@redhat.com
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/20170829085939.ggmb6xiohw67micb@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f73ca59..44c8d0d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4623,13 +4623,8 @@
 	/*
 	 * The lock history for each syscall should be independent. So wipe the
 	 * slate clean on return to userspace.
-	 *
-	 * crossrelease_hist_end() works well here even when getting here
-	 * without starting (i.e. just after forking), because it rolls back
-	 * the index to point to the last entry, which is already invalid.
 	 */
-	crossrelease_hist_end(XHLOCK_PROC);
-	crossrelease_hist_start(XHLOCK_PROC, false);
+	lockdep_invariant_state(false);
 }
 
 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -4723,19 +4718,47 @@
 }
 
 /*
- * Lock history stacks; we have 3 nested lock history stacks:
+ * Lock history stacks; we have 2 nested lock history stacks:
  *
  *   HARD(IRQ)
  *   SOFT(IRQ)
- *   PROC(ess)
  *
  * The thing is that once we complete a HARD/SOFT IRQ the future task locks
  * should not depend on any of the locks observed while running the IRQ.  So
  * what we do is rewind the history buffer and erase all our knowledge of that
  * temporal event.
- *
- * The PROCess one is special though; it is used to annotate independence
- * inside a task.
+ */
+
+void crossrelease_hist_start(enum xhlock_context_t c)
+{
+	struct task_struct *cur = current;
+
+	if (!cur->xhlocks)
+		return;
+
+	cur->xhlock_idx_hist[c] = cur->xhlock_idx;
+	cur->hist_id_save[c]    = cur->hist_id;
+}
+
+void crossrelease_hist_end(enum xhlock_context_t c)
+{
+	struct task_struct *cur = current;
+
+	if (cur->xhlocks) {
+		unsigned int idx = cur->xhlock_idx_hist[c];
+		struct hist_lock *h = &xhlock(idx);
+
+		cur->xhlock_idx = idx;
+
+		/* Check if the ring was overwritten. */
+		if (h->hist_id != cur->hist_id_save[c])
+			invalidate_xhlock(h);
+	}
+}
+
+/*
+ * lockdep_invariant_state() is used to annotate independence inside a task, to
+ * make one task look like multiple independent 'tasks'.
  *
  * Take for instance workqueues; each work is independent of the last. The
  * completion of a future work does not depend on the completion of a past work
@@ -4758,40 +4781,14 @@
  * entry. Similarly, independence per-definition means it does not depend on
  * prior state.
  */
-void crossrelease_hist_start(enum xhlock_context_t c, bool force)
+void lockdep_invariant_state(bool force)
 {
-	struct task_struct *cur = current;
-
-	if (!cur->xhlocks)
-		return;
-
 	/*
 	 * We call this at an invariant point, no current state, no history.
+	 * Verify the former, enforce the latter.
 	 */
-	if (c == XHLOCK_PROC) {
-		/* verified the former, ensure the latter */
-		WARN_ON_ONCE(!force && cur->lockdep_depth);
-		invalidate_xhlock(&xhlock(cur->xhlock_idx));
-	}
-
-	cur->xhlock_idx_hist[c] = cur->xhlock_idx;
-	cur->hist_id_save[c]    = cur->hist_id;
-}
-
-void crossrelease_hist_end(enum xhlock_context_t c)
-{
-	struct task_struct *cur = current;
-
-	if (cur->xhlocks) {
-		unsigned int idx = cur->xhlock_idx_hist[c];
-		struct hist_lock *h = &xhlock(idx);
-
-		cur->xhlock_idx = idx;
-
-		/* Check if the ring was overwritten. */
-		if (h->hist_id != cur->hist_id_save[c])
-			invalidate_xhlock(h);
-	}
+	WARN_ON_ONCE(!force && current->lockdep_depth);
+	invalidate_xhlock(&xhlock(current->xhlock_idx));
 }
 
 static int cross_lock(struct lockdep_map *lock)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c033189..ab3c0dc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2094,8 +2094,8 @@
 	lock_map_acquire(&pwq->wq->lockdep_map);
 	lock_map_acquire(&lockdep_map);
 	/*
-	 * Strictly speaking we should do start(PROC) without holding any
-	 * locks, that is, before these two lock_map_acquire()'s.
+	 * Strictly speaking we should mark the invariant state without holding
+	 * any locks, that is, before these two lock_map_acquire()'s.
 	 *
 	 * However, that would result in:
 	 *
@@ -2107,14 +2107,14 @@
 	 * Which would create W1->C->W1 dependencies, even though there is no
 	 * actual deadlock possible. There are two solutions, using a
 	 * read-recursive acquire on the work(queue) 'locks', but this will then
-	 * hit the lockdep limitation on recursive locks, or simly discard
+	 * hit the lockdep limitation on recursive locks, or simply discard
 	 * these locks.
 	 *
 	 * AFAICT there is no possible deadlock scenario between the
 	 * flush_work() and complete() primitives (except for single-threaded
 	 * workqueues), so hiding them isn't a problem.
 	 */
-	crossrelease_hist_start(XHLOCK_PROC, true);
+	lockdep_invariant_state(true);
 	trace_workqueue_execute_start(work);
 	worker->current_func(work);
 	/*
@@ -2122,7 +2122,6 @@
 	 * point will only record its address.
 	 */
 	trace_workqueue_execute_end(work);
-	crossrelease_hist_end(XHLOCK_PROC);
 	lock_map_release(&lockdep_map);
 	lock_map_release(&pwq->wq->lockdep_map);