Skip to content

Commit 80793c3

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: Introduce seqcount_latch_t
Latch sequence counters are a multiversion concurrency control mechanism where the seqcount_t counter even/odd value is used to switch between two copies of protected data. This allows the seqcount_t read path to safely interrupt its write side critical section (e.g. from NMIs). Initially, latch sequence counters were implemented as a single write function above plain seqcount_t: raw_write_seqcount_latch(). The read side was expected to use plain seqcount_t raw_read_seqcount(). A specialized latch read function, raw_read_seqcount_latch(), was later added. It became the standardized way for latch read paths. Due to the dependent load, it has one read memory barrier less than the plain seqcount_t raw_read_seqcount() API. Only raw_write_seqcount_latch() and raw_read_seqcount_latch() should be used with latch sequence counters. Having *unique* read and write path APIs means that latch sequence counters are actually a data type of their own -- just inappropriately overloading plain seqcount_t. Introduce seqcount_latch_t. This adds type-safety and ensures that only the correct latch-safe APIs are to be used. Not to break bisection, let the latch APIs also accept plain seqcount_t or seqcount_raw_spinlock_t. After converting all call sites to seqcount_latch_t, only that new data type will be allowed. References: 9b0fd80 ("seqcount: Add raw_write_seqcount_latch()") References: 7fc2632 ("seqlock: Introduce raw_read_seqcount_latch()") References: aadd6e5 ("time/sched_clock: Use raw_read_seqcount_latch()") Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200827114044.11173-4-a.darwish@linutronix.de
1 parent 6446a51 commit 80793c3

File tree

2 files changed

+91
-31
lines changed

2 files changed

+91
-31
lines changed

Documentation/locking/seqlock.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,24 @@ with the associated LOCKTYPE lock acquired.
139139

140140
Read path: same as in :ref:`seqcount_t`.
141141

142+
143+
.. _seqcount_latch_t:
144+
145+
Latch sequence counters (``seqcount_latch_t``)
146+
----------------------------------------------
147+
148+
Latch sequence counters are a multiversion concurrency control mechanism
149+
where the embedded seqcount_t counter even/odd value is used to switch
150+
between two copies of protected data. This allows the sequence counter
151+
read path to safely interrupt its own write side critical section.
152+
153+
Use seqcount_latch_t when the write side sections cannot be protected
154+
from interruption by readers. This is typically the case when the read
155+
side can be invoked from NMI handlers.
156+
157+
Check `raw_write_seqcount_latch()` for more information.
158+
159+
142160
.. _seqlock_t:
143161

144162
Sequential locks (``seqlock_t``)

include/linux/seqlock.h

Lines changed: 73 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -587,34 +587,76 @@ static inline void write_seqcount_t_invalidate(seqcount_t *s)
587587
kcsan_nestable_atomic_end();
588588
}
589589

590-
/**
591-
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
592-
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
590+
/*
591+
* Latch sequence counters (seqcount_latch_t)
593592
*
594-
* Use seqcount_t latching to switch between two storage places protected
595-
* by a sequence counter. Doing so allows having interruptible, preemptible,
596-
* seqcount_t write side critical sections.
593+
* A sequence counter variant where the counter even/odd value is used to
594+
* switch between two copies of protected data. This allows the read path,
595+
* typically NMIs, to safely interrupt the write side critical section.
597596
*
598-
* Check raw_write_seqcount_latch() for more details and a full reader and
599-
* writer usage example.
597+
* As the write sections are fully preemptible, no special handling for
598+
* PREEMPT_RT is needed.
599+
*/
600+
typedef struct {
601+
seqcount_t seqcount;
602+
} seqcount_latch_t;
603+
604+
/**
605+
* SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
606+
* @seq_name: Name of the seqcount_latch_t instance
607+
*/
608+
#define SEQCNT_LATCH_ZERO(seq_name) { \
609+
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
610+
}
611+
612+
/**
613+
* seqcount_latch_init() - runtime initializer for seqcount_latch_t
614+
* @s: Pointer to the seqcount_latch_t instance
615+
*/
616+
static inline void seqcount_latch_init(seqcount_latch_t *s)
617+
{
618+
seqcount_init(&s->seqcount);
619+
}
620+
621+
/**
622+
* raw_read_seqcount_latch() - pick even/odd latch data copy
623+
* @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
624+
*
625+
* See raw_write_seqcount_latch() for details and a full reader/writer
626+
* usage example.
600627
*
601628
* Return: sequence counter raw value. Use the lowest bit as an index for
602-
* picking which data copy to read. The full counter value must then be
603-
* checked with read_seqcount_retry().
629+
* picking which data copy to read. The full counter must then be checked
630+
* with read_seqcount_latch_retry().
604631
*/
605-
#define raw_read_seqcount_latch(s) \
606-
raw_read_seqcount_t_latch(__seqcount_ptr(s))
632+
#define raw_read_seqcount_latch(s) \
633+
({ \
634+
/* \
635+
* Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \
636+
* Due to the dependent load, a full smp_rmb() is not needed. \
637+
*/ \
638+
_Generic(*(s), \
639+
seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \
640+
seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \
641+
seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \
642+
})
607643

608-
static inline int raw_read_seqcount_t_latch(seqcount_t *s)
644+
/**
645+
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
646+
* @s: Pointer to seqcount_latch_t
647+
* @start: count, from raw_read_seqcount_latch()
648+
*
649+
* Return: true if a read section retry is required, else false
650+
*/
651+
static inline int
652+
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
609653
{
610-
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
611-
int seq = READ_ONCE(s->sequence); /* ^^^ */
612-
return seq;
654+
return read_seqcount_retry(&s->seqcount, start);
613655
}
614656

615657
/**
616-
* raw_write_seqcount_latch() - redirect readers to even/odd copy
617-
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
658+
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
659+
* @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
618660
*
619661
* The latch technique is a multiversion concurrency control method that allows
620662
* queries during non-atomic modifications. If you can guarantee queries never
@@ -633,7 +675,7 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
633675
* The basic form is a data structure like::
634676
*
635677
* struct latch_struct {
636-
* seqcount_t seq;
678+
* seqcount_latch_t seq;
637679
* struct data_struct data[2];
638680
* };
639681
*
@@ -643,13 +685,13 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
643685
* void latch_modify(struct latch_struct *latch, ...)
644686
* {
645687
* smp_wmb(); // Ensure that the last data[1] update is visible
646-
* latch->seq++;
688+
* latch->seq.sequence++;
647689
* smp_wmb(); // Ensure that the seqcount update is visible
648690
*
649691
* modify(latch->data[0], ...);
650692
*
651693
* smp_wmb(); // Ensure that the data[0] update is visible
652-
* latch->seq++;
694+
* latch->seq.sequence++;
653695
* smp_wmb(); // Ensure that the seqcount update is visible
654696
*
655697
* modify(latch->data[1], ...);
@@ -668,8 +710,8 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
668710
* idx = seq & 0x01;
669711
* entry = data_query(latch->data[idx], ...);
670712
*
671-
* // read_seqcount_retry() includes needed smp_rmb()
672-
* } while (read_seqcount_retry(&latch->seq, seq));
713+
* // This includes needed smp_rmb()
714+
* } while (read_seqcount_latch_retry(&latch->seq, seq));
673715
*
674716
* return entry;
675717
* }
@@ -693,14 +735,14 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
693735
* When data is a dynamic data structure; one should use regular RCU
694736
* patterns to manage the lifetimes of the objects within.
695737
*/
696-
#define raw_write_seqcount_latch(s) \
697-
raw_write_seqcount_t_latch(__seqcount_ptr(s))
698-
699-
static inline void raw_write_seqcount_t_latch(seqcount_t *s)
700-
{
701-
smp_wmb(); /* prior stores before incrementing "sequence" */
702-
s->sequence++;
703-
smp_wmb(); /* increment "sequence" before following stores */
738+
#define raw_write_seqcount_latch(s) \
739+
{ \
740+
smp_wmb(); /* prior stores before incrementing "sequence" */ \
741+
_Generic(*(s), \
742+
seqcount_t: ((seqcount_t *)s)->sequence++, \
743+
seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \
744+
seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \
745+
smp_wmb(); /* increment "sequence" before following stores */ \
704746
}
705747

706748
/*

0 commit comments

Comments
 (0)