rcu-tasks: Split ->trc_reader_need_end
This commit splits ->trc_reader_need_end by using the rcu_special union.
This change permits readers to check to see if a memory barrier is
required without any added overhead in the common case where no such
barrier is required. This commit also adds the read-side checking.
Later commits will add the machinery to properly set the new
->trc_reader_special.b.need_mb field.
This commit also makes rcu_read_unlock_trace_special() tolerate nested
read-side critical sections within interrupt and NMI handlers.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
diff --git a/kernel/fork.c b/kernel/fork.c
index 72e93962..96eb4b5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1685,6 +1685,7 @@
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_TRACE_RCU
p->trc_reader_nesting = 0;
+ p->trc_reader_special.s = 0;
INIT_LIST_HEAD(&p->trc_holdout_list);
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index eeac4a1..17b1b9a 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -723,10 +723,17 @@
"RCU Tasks Trace");
/* If we are the last reader, wake up the grace-period kthread. */
-void rcu_read_unlock_trace_special(struct task_struct *t)
+void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
{
- WRITE_ONCE(t->trc_reader_need_end, false);
- if (atomic_dec_and_test(&trc_n_readers_need_end))
+ int nq = t->trc_reader_special.b.need_qs;
+
+ if (t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers.
+ // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
+ if (nq)
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
+ if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
wake_up(&trc_wait);
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
@@ -777,8 +784,8 @@
// Get here if the task is in a read-side critical section. Set
// its state so that it will awaken the grace-period kthread upon
// exit from that critical section.
- WARN_ON_ONCE(t->trc_reader_need_end);
- WRITE_ONCE(t->trc_reader_need_end, true);
+ WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
reset_ipi:
// Allow future IPIs to be sent on CPU and for task.
@@ -804,8 +811,8 @@
// exit from that critical section.
if (unlikely(t->trc_reader_nesting)) {
atomic_inc(&trc_n_readers_need_end); // One more to wait on.
- WARN_ON_ONCE(t->trc_reader_need_end);
- WRITE_ONCE(t->trc_reader_need_end, true);
+ WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
}
return true;
}
@@ -884,7 +891,7 @@
static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop)
{
- WRITE_ONCE(t->trc_reader_need_end, false);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1;
trc_wait_for_one_reader(t, hop);
@@ -916,7 +923,7 @@
".i"[is_idle_task(t)],
".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
t->trc_reader_nesting,
- " N"[!!t->trc_reader_need_end],
+ " N"[!!t->trc_reader_special.b.need_qs],
cpu);
sched_show_task(t);
}
@@ -980,11 +987,11 @@
break; // Count reached zero.
// Stall warning time, so make a list of the offenders.
for_each_process_thread(g, t)
- if (READ_ONCE(t->trc_reader_need_end))
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
trc_add_holdout(t, &holdouts);
firstreport = true;
list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
- if (READ_ONCE(t->trc_reader_need_end)) {
+ if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
show_stalled_task_trace(t, &firstreport);
trc_del_holdout(t);
}
@@ -1003,8 +1010,8 @@
WRITE_ONCE(t->trc_reader_checked, true);
WARN_ON_ONCE(t->trc_reader_nesting);
WRITE_ONCE(t->trc_reader_nesting, 0);
- if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
- rcu_read_unlock_trace_special(t);
+ if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
+ rcu_read_unlock_trace_special(t, 0);
}
/**