@@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
626
626
}
627
627
EXPORT_SYMBOL_GPL (cleanup_srcu_struct );
628
628
629
+ /*
630
+ * Check for consistent NMI safety.
631
+ */
632
+ static void srcu_check_nmi_safety (struct srcu_struct * ssp , bool nmi_safe )
633
+ {
634
+ int nmi_safe_mask = 1 << nmi_safe ;
635
+ int old_nmi_safe_mask ;
636
+ struct srcu_data * sdp ;
637
+
638
+ if (!IS_ENABLED (CONFIG_PROVE_RCU ))
639
+ return ;
640
+ sdp = raw_cpu_ptr (ssp -> sda );
641
+ old_nmi_safe_mask = READ_ONCE (sdp -> srcu_nmi_safety );
642
+ if (!old_nmi_safe_mask ) {
643
+ WRITE_ONCE (sdp -> srcu_nmi_safety , nmi_safe_mask );
644
+ return ;
645
+ }
646
+ WARN_ONCE (old_nmi_safe_mask != nmi_safe_mask , "CPU %d old state %d new state %d\n" , sdp -> cpu , old_nmi_safe_mask , nmi_safe_mask );
647
+ }
648
+
629
649
/*
630
650
* Counts the new reader in the appropriate per-CPU element of the
631
651
* srcu_struct.
@@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
638
658
idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
639
659
this_cpu_inc (ssp -> sda -> srcu_lock_count [idx ].counter );
640
660
smp_mb (); /* B */ /* Avoid leaking the critical section. */
661
+ srcu_check_nmi_safety (ssp , false);
641
662
return idx ;
642
663
}
643
664
EXPORT_SYMBOL_GPL (__srcu_read_lock );
@@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651
672
{
652
673
smp_mb (); /* C */ /* Avoid leaking the critical section. */
653
674
this_cpu_inc (ssp -> sda -> srcu_unlock_count [idx ].counter );
675
+ srcu_check_nmi_safety (ssp , false);
654
676
}
655
677
EXPORT_SYMBOL_GPL (__srcu_read_unlock );
656
678
@@ -659,14 +681,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
659
681
* srcu_struct, but in an NMI-safe manner using RMW atomics.
660
682
* Returns an index that must be passed to the matching srcu_read_unlock().
661
683
*/
662
- int __srcu_read_lock_nmisafe (struct srcu_struct * ssp )
684
+ int __srcu_read_lock_nmisafe (struct srcu_struct * ssp , bool chknmisafe )
663
685
{
664
686
int idx ;
665
687
struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
666
688
667
689
idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
668
690
atomic_long_inc (& sdp -> srcu_lock_count [idx ]);
669
691
smp_mb__after_atomic (); /* B */ /* Avoid leaking the critical section. */
692
+ if (chknmisafe )
693
+ srcu_check_nmi_safety (ssp , true);
670
694
return idx ;
671
695
}
672
696
EXPORT_SYMBOL_GPL (__srcu_read_lock_nmisafe );
@@ -676,12 +700,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
676
700
* element of the srcu_struct. Note that this may well be a different
677
701
* CPU than that which was incremented by the corresponding srcu_read_lock().
678
702
*/
679
- void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx )
703
+ void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx , bool chknmisafe )
680
704
{
681
705
struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
682
706
683
707
smp_mb__before_atomic (); /* C */ /* Avoid leaking the critical section. */
684
708
atomic_long_inc (& sdp -> srcu_unlock_count [idx ]);
709
+ if (chknmisafe )
710
+ srcu_check_nmi_safety (ssp , true);
685
711
}
686
712
EXPORT_SYMBOL_GPL (__srcu_read_unlock_nmisafe );
687
713
@@ -1121,7 +1147,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1121
1147
int ss_state ;
1122
1148
1123
1149
check_init_srcu_struct (ssp );
1124
- idx = __srcu_read_lock_nmisafe (ssp );
1150
+ idx = __srcu_read_lock_nmisafe (ssp , false );
1125
1151
ss_state = smp_load_acquire (& ssp -> srcu_size_state );
1126
1152
if (ss_state < SRCU_SIZE_WAIT_CALL )
1127
1153
sdp = per_cpu_ptr (ssp -> sda , 0 );
@@ -1154,7 +1180,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1154
1180
srcu_funnel_gp_start (ssp , sdp , s , do_norm );
1155
1181
else if (needexp )
1156
1182
srcu_funnel_exp_start (ssp , sdp_mynode , s );
1157
- __srcu_read_unlock_nmisafe (ssp , idx );
1183
+ __srcu_read_unlock_nmisafe (ssp , idx , false );
1158
1184
return s ;
1159
1185
}
1160
1186
@@ -1458,13 +1484,13 @@ void srcu_barrier(struct srcu_struct *ssp)
1458
1484
/* Initial count prevents reaching zero until all CBs are posted. */
1459
1485
atomic_set (& ssp -> srcu_barrier_cpu_cnt , 1 );
1460
1486
1461
- idx = __srcu_read_lock_nmisafe (ssp );
1487
+ idx = __srcu_read_lock_nmisafe (ssp , false );
1462
1488
if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
1463
1489
srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , 0 ));
1464
1490
else
1465
1491
for_each_possible_cpu (cpu )
1466
1492
srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , cpu ));
1467
- __srcu_read_unlock_nmisafe (ssp , idx );
1493
+ __srcu_read_unlock_nmisafe (ssp , idx , false );
1468
1494
1469
1495
/* Remove the initial count, at which point reaching zero can happen. */
1470
1496
if (atomic_dec_and_test (& ssp -> srcu_barrier_cpu_cnt ))
0 commit comments