Skip to content

Commit 6d3aadf

Browse files
KAGA-KOKOjohnstultz-work
authored andcommitted
timekeeping: Restructure the timekeeper some more
Access to time requires to touch two cachelines at minimum 1) The timekeeper data structure 2) The clocksource data structure The access to the clocksource data structure can be avoided as almost all clocksource implementations ignore the argument to the read callback, which is a pointer to the clocksource. But the core needs to touch it to access the members @READ and @Mask. So we are better off by copying the @READ function pointer and the @Mask from the clocksource to the core data structure itself. For the most used ktime_get() access all required data including the @READ and @Mask copies fits together with the sequence counter into a single 64 byte cacheline. For the other time access functions we touch in the current code three cache lines in the worst case. But with the clocksource data copies we can reduce that to two adjacent cachelines, which is more efficient than disjunct cache lines. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: John Stultz <john.stultz@linaro.org>
1 parent 4a0e637 commit 6d3aadf

File tree

2 files changed

+19
-20
lines changed

2 files changed

+19
-20
lines changed

include/linux/timekeeper_internal.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@
2929
struct timekeeper {
3030
/* Current clocksource used for timekeeping. */
3131
struct clocksource *clock;
32+
/* Read function of @clock */
33+
cycle_t (*read)(struct clocksource *cs);
34+
/* Bitmask for two's complement subtraction of non 64bit counters */
35+
cycle_t mask;
3236
/* Last cycle value */
3337
cycle_t cycle_last;
3438
/* NTP adjusted clock multiplier */

kernel/time/timekeeping.c

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,9 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
121121

122122
old_clock = tk->clock;
123123
tk->clock = clock;
124-
tk->cycle_last = clock->read(clock);
124+
tk->read = clock->read;
125+
tk->mask = clock->mask;
126+
tk->cycle_last = tk->read(clock);
125127

126128
/* Do the ns -> cycle conversion first, using original mult */
127129
tmp = NTP_INTERVAL_LENGTH;
@@ -174,15 +176,13 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
174176
static inline s64 timekeeping_get_ns(struct timekeeper *tk)
175177
{
176178
cycle_t cycle_now, delta;
177-
struct clocksource *clock;
178179
s64 nsec;
179180

180181
/* read clocksource: */
181-
clock = tk->clock;
182-
cycle_now = clock->read(clock);
182+
cycle_now = tk->read(tk->clock);
183183

184184
/* calculate the delta since the last update_wall_time: */
185-
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
185+
delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
186186

187187
nsec = delta * tk->mult + tk->xtime_nsec;
188188
nsec >>= tk->shift;
@@ -193,16 +193,15 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
193193

194194
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
195195
{
196+
struct clocksource *clock = tk->clock;
196197
cycle_t cycle_now, delta;
197-
struct clocksource *clock;
198198
s64 nsec;
199199

200200
/* read clocksource: */
201-
clock = tk->clock;
202-
cycle_now = clock->read(clock);
201+
cycle_now = tk->read(clock);
203202

204203
/* calculate the delta since the last update_wall_time: */
205-
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
204+
delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
206205

207206
/* convert delta to nanoseconds. */
208207
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
@@ -337,13 +336,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
337336
*/
338337
static void timekeeping_forward_now(struct timekeeper *tk)
339338
{
339+
struct clocksource *clock = tk->clock;
340340
cycle_t cycle_now, delta;
341-
struct clocksource *clock;
342341
s64 nsec;
343342

344-
clock = tk->clock;
345-
cycle_now = clock->read(clock);
346-
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
343+
cycle_now = tk->read(clock);
344+
delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
347345
tk->cycle_last = cycle_now;
348346

349347
tk->xtime_nsec += delta * tk->mult;
@@ -1019,7 +1017,7 @@ static void timekeeping_resume(void)
10191017
* The less preferred source will only be tried if there is no better
10201018
* usable source. The rtc part is handled separately in rtc core code.
10211019
*/
1022-
cycle_now = clock->read(clock);
1020+
cycle_now = tk->read(clock);
10231021
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
10241022
cycle_now > tk->cycle_last) {
10251023
u64 num, max = ULLONG_MAX;
@@ -1028,7 +1026,7 @@ static void timekeeping_resume(void)
10281026
s64 nsec = 0;
10291027

10301028
cycle_delta = clocksource_delta(cycle_now, tk->cycle_last,
1031-
clock->mask);
1029+
tk->mask);
10321030

10331031
/*
10341032
* "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -1415,7 +1413,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
14151413
*/
14161414
void update_wall_time(void)
14171415
{
1418-
struct clocksource *clock;
14191416
struct timekeeper *real_tk = &tk_core.timekeeper;
14201417
struct timekeeper *tk = &shadow_timekeeper;
14211418
cycle_t offset;
@@ -1429,13 +1426,11 @@ void update_wall_time(void)
14291426
if (unlikely(timekeeping_suspended))
14301427
goto out;
14311428

1432-
clock = real_tk->clock;
1433-
14341429
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
14351430
offset = real_tk->cycle_interval;
14361431
#else
1437-
offset = clocksource_delta(clock->read(clock), tk->cycle_last,
1438-
clock->mask);
1432+
offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last,
1433+
tk->mask);
14391434
#endif
14401435

14411436
/* Check if there's really nothing to do */

0 commit comments

Comments
 (0)