Skip to content

Commit 2d20ef4

Browse files
committed
srcu: Check for consistent per-CPU per-srcu_struct NMI safety
This commit adds runtime checks to verify that a given srcu_struct uses consistent NMI-safe (or not) read-side primitives on a per-CPU basis. Link: https://lore.kernel.org/all/[email protected]/ Signed-off-by: Paul E. McKenney <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: John Ogness <[email protected]> Cc: Petr Mladek <[email protected]>
1 parent d4841a5 commit 2d20ef4

File tree

3 files changed

+41
-10
lines changed

3 files changed

+41
-10
lines changed

include/linux/srcu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,8 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
5959
void cleanup_srcu_struct(struct srcu_struct *ssp);
6060
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
6161
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
62-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
63-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
62+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) __acquires(ssp);
63+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) __releases(ssp);
6464
void synchronize_srcu(struct srcu_struct *ssp);
6565
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
6666
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
@@ -180,7 +180,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
180180
int retval;
181181

182182
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
183-
retval = __srcu_read_lock_nmisafe(ssp);
183+
retval = __srcu_read_lock_nmisafe(ssp, true);
184184
else
185185
retval = __srcu_read_lock(ssp);
186186
rcu_lock_acquire(&(ssp)->dep_map);
@@ -225,7 +225,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
225225
WARN_ON_ONCE(idx & ~0x1);
226226
rcu_lock_release(&(ssp)->dep_map);
227227
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
228-
__srcu_read_unlock_nmisafe(ssp, idx);
228+
__srcu_read_unlock_nmisafe(ssp, idx, true);
229229
else
230230
__srcu_read_unlock(ssp, idx);
231231
}

include/linux/srcutree.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ struct srcu_data {
2525
/* Read-side state. */
2626
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
2727
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
28+
int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
2829

2930
/* Update-side state. */
3031
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -42,6 +43,10 @@ struct srcu_data {
4243
struct srcu_struct *ssp;
4344
};
4445

46+
#define SRCU_NMI_UNKNOWN 0x0
47+
#define SRCU_NMI_NMI_UNSAFE 0x1
48+
#define SRCU_NMI_NMI_SAFE 0x2
49+
4550
/*
4651
* Node in SRCU combining tree, similar in function to rcu_data.
4752
*/

kernel/rcu/srcutree.c

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
626626
}
627627
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
628628

629+
/*
630+
* Check for consistent NMI safety.
631+
*/
632+
static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
633+
{
634+
int nmi_safe_mask = 1 << nmi_safe;
635+
int old_nmi_safe_mask;
636+
struct srcu_data *sdp;
637+
638+
if (!IS_ENABLED(CONFIG_PROVE_RCU))
639+
return;
640+
sdp = raw_cpu_ptr(ssp->sda);
641+
old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
642+
if (!old_nmi_safe_mask) {
643+
WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
644+
return;
645+
}
646+
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
647+
}
648+
629649
/*
630650
* Counts the new reader in the appropriate per-CPU element of the
631651
* srcu_struct.
@@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
638658
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
639659
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
640660
smp_mb(); /* B */ /* Avoid leaking the critical section. */
661+
srcu_check_nmi_safety(ssp, false);
641662
return idx;
642663
}
643664
EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651672
{
652673
smp_mb(); /* C */ /* Avoid leaking the critical section. */
653674
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
675+
srcu_check_nmi_safety(ssp, false);
654676
}
655677
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
656678

@@ -659,14 +681,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
659681
* srcu_struct, but in an NMI-safe manner using RMW atomics.
660682
* Returns an index that must be passed to the matching srcu_read_unlock().
661683
*/
662-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
684+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
663685
{
664686
int idx;
665687
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
666688

667689
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
668690
atomic_long_inc(&sdp->srcu_lock_count[idx]);
669691
smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
692+
if (chknmisafe)
693+
srcu_check_nmi_safety(ssp, true);
670694
return idx;
671695
}
672696
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
@@ -676,12 +700,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
676700
* element of the srcu_struct. Note that this may well be a different
677701
* CPU than that which was incremented by the corresponding srcu_read_lock().
678702
*/
679-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
703+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
680704
{
681705
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
682706

683707
smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
684708
atomic_long_inc(&sdp->srcu_unlock_count[idx]);
709+
if (chknmisafe)
710+
srcu_check_nmi_safety(ssp, true);
685711
}
686712
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
687713

@@ -1121,7 +1147,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11211147
int ss_state;
11221148

11231149
check_init_srcu_struct(ssp);
1124-
idx = __srcu_read_lock_nmisafe(ssp);
1150+
idx = __srcu_read_lock_nmisafe(ssp, false);
11251151
ss_state = smp_load_acquire(&ssp->srcu_size_state);
11261152
if (ss_state < SRCU_SIZE_WAIT_CALL)
11271153
sdp = per_cpu_ptr(ssp->sda, 0);
@@ -1154,7 +1180,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11541180
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
11551181
else if (needexp)
11561182
srcu_funnel_exp_start(ssp, sdp_mynode, s);
1157-
__srcu_read_unlock_nmisafe(ssp, idx);
1183+
__srcu_read_unlock_nmisafe(ssp, idx, false);
11581184
return s;
11591185
}
11601186

@@ -1458,13 +1484,13 @@ void srcu_barrier(struct srcu_struct *ssp)
14581484
/* Initial count prevents reaching zero until all CBs are posted. */
14591485
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
14601486

1461-
idx = __srcu_read_lock_nmisafe(ssp);
1487+
idx = __srcu_read_lock_nmisafe(ssp, false);
14621488
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
14631489
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
14641490
else
14651491
for_each_possible_cpu(cpu)
14661492
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1467-
__srcu_read_unlock_nmisafe(ssp, idx);
1493+
__srcu_read_unlock_nmisafe(ssp, idx, false);
14681494

14691495
/* Remove the initial count, at which point reaching zero can happen. */
14701496
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))

0 commit comments

Comments
 (0)