Skip to content

Commit

Permalink
WT-2924 Ensure we are doing eviction when threads are waiting for it. (
Browse files Browse the repository at this point in the history
…#3056)

Fix a bug in scrub step down, where it could set the target below the eviction target leading to an unexpected eviction state.
  • Loading branch information
michaelcahill authored and agorrod committed Sep 23, 2016
1 parent e75ef57 commit d85e189
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 35 deletions.
13 changes: 8 additions & 5 deletions src/evict/evict_lru.c
Expand Up @@ -431,7 +431,6 @@ __evict_update_work(WT_SESSION_IMPL *session)
{
WT_CACHE *cache;
WT_CONNECTION_IMPL *conn;
double dirty_trigger;
uint64_t bytes_inuse, bytes_max, dirty_inuse;

conn = S2C(session);
Expand All @@ -456,15 +455,13 @@ __evict_update_work(WT_SESSION_IMPL *session)
bytes_inuse = __wt_cache_bytes_inuse(cache);
if (bytes_inuse > (cache->eviction_target * bytes_max) / 100)
F_SET(cache, WT_CACHE_EVICT_CLEAN);
if (bytes_inuse > (cache->eviction_trigger * bytes_max) / 100)
if (__wt_eviction_clean_needed(session, NULL))
F_SET(cache, WT_CACHE_EVICT_CLEAN_HARD);

dirty_inuse = __wt_cache_dirty_leaf_inuse(cache);
if (dirty_inuse > (cache->eviction_dirty_target * bytes_max) / 100)
F_SET(cache, WT_CACHE_EVICT_DIRTY);
if ((dirty_trigger = cache->eviction_scrub_limit) < 1.0)
dirty_trigger = (double)cache->eviction_dirty_trigger;
if (dirty_inuse > (uint64_t)(dirty_trigger * bytes_max) / 100)
if (__wt_eviction_dirty_needed(session, NULL))
F_SET(cache, WT_CACHE_EVICT_DIRTY_HARD);

/*
Expand Down Expand Up @@ -497,6 +494,12 @@ __evict_update_work(WT_SESSION_IMPL *session)
F_CLR(cache, WT_CACHE_EVICT_CLEAN | WT_CACHE_EVICT_CLEAN_HARD);
}

/* If threads are blocked by eviction we should be looking for pages. */
WT_ASSERT(session, !F_ISSET(cache, WT_CACHE_EVICT_CLEAN_HARD) ||
F_ISSET(cache, WT_CACHE_EVICT_CLEAN));
WT_ASSERT(session, !F_ISSET(cache, WT_CACHE_EVICT_DIRTY_HARD) ||
F_ISSET(cache, WT_CACHE_EVICT_DIRTY));

WT_STAT_CONN_SET(session, cache_eviction_state,
F_MASK(cache, WT_CACHE_EVICT_MASK));

Expand Down
2 changes: 1 addition & 1 deletion src/include/cache.h
Expand Up @@ -175,7 +175,7 @@ struct __wt_cache {
#define WT_CACHE_EVICT_CLEAN_HARD 0x002 /* Clean % blocking app threads */
#define WT_CACHE_EVICT_DIRTY 0x004 /* Evict dirty pages */
#define WT_CACHE_EVICT_DIRTY_HARD 0x008 /* Dirty % blocking app threads */
#define WT_CACHE_EVICT_SCRUB 0x010 /* Scrub dirty pages pages */
#define WT_CACHE_EVICT_SCRUB 0x010 /* Scrub dirty pages */
#define WT_CACHE_EVICT_URGENT 0x020 /* Pages are in the urgent queue */
#define WT_CACHE_EVICT_ALL (WT_CACHE_EVICT_CLEAN | WT_CACHE_EVICT_DIRTY)
#define WT_CACHE_EVICT_MASK 0x0FF
Expand Down
94 changes: 66 additions & 28 deletions src/include/cache.i
Expand Up @@ -193,7 +193,7 @@ __wt_cache_bytes_other(WT_CACHE *cache)
* __wt_session_can_wait --
* Return if a session available for a potentially slow operation.
*/
static inline int
static inline bool
__wt_session_can_wait(WT_SESSION_IMPL *session)
{
/*
Expand All @@ -202,17 +202,71 @@ __wt_session_can_wait(WT_SESSION_IMPL *session)
* the system cache.
*/
if (!F_ISSET(session, WT_SESSION_CAN_WAIT))
return (0);
return (false);

/*
* LSM sets the no-eviction flag when holding the LSM tree lock, in that
* case, or when holding the schema lock, we don't want to highjack the
* thread for eviction.
*/
if (F_ISSET(session, WT_SESSION_NO_EVICTION | WT_SESSION_LOCKED_SCHEMA))
return (0);
return (!F_ISSET(
session, WT_SESSION_NO_EVICTION | WT_SESSION_LOCKED_SCHEMA));
}

/*
* __wt_eviction_clean_needed --
* Return if an application thread should do eviction due to the total
* volume of dirty data in cache.
*/
static inline bool
__wt_eviction_clean_needed(WT_SESSION_IMPL *session, u_int *pct_fullp)
{
WT_CACHE *cache;
uint64_t bytes_inuse, bytes_max;

cache = S2C(session)->cache;

/*
* Avoid division by zero if the cache size has not yet been set in a
* shared cache.
*/
bytes_max = S2C(session)->cache_size + 1;
bytes_inuse = __wt_cache_bytes_inuse(cache);

if (pct_fullp != NULL)
*pct_fullp = (u_int)((100 * bytes_inuse) / bytes_max);

return (bytes_inuse > (cache->eviction_trigger * bytes_max) / 100);
}

/*
* __wt_eviction_dirty_needed --
* Return if an application thread should do eviction due to the total
* volume of dirty data in cache.
*/
static inline bool
__wt_eviction_dirty_needed(WT_SESSION_IMPL *session, u_int *pct_fullp)
{
WT_CACHE *cache;
double dirty_trigger;
uint64_t dirty_inuse, bytes_max;

cache = S2C(session)->cache;

/*
* Avoid division by zero if the cache size has not yet been set in a
* shared cache.
*/
bytes_max = S2C(session)->cache_size + 1;
dirty_inuse = __wt_cache_dirty_leaf_inuse(cache);

if (pct_fullp != NULL)
*pct_fullp = (u_int)((100 * dirty_inuse) / bytes_max);

if ((dirty_trigger = cache->eviction_scrub_limit) < 1.0)
dirty_trigger = (double)cache->eviction_dirty_trigger;

return (1);
return (dirty_inuse > (uint64_t)(dirty_trigger * bytes_max) / 100);
}

/*
Expand All @@ -223,42 +277,30 @@ __wt_session_can_wait(WT_SESSION_IMPL *session)
static inline bool
__wt_eviction_needed(WT_SESSION_IMPL *session, bool busy, u_int *pct_fullp)
{
WT_CONNECTION_IMPL *conn;
WT_CACHE *cache;
double dirty_trigger;
uint64_t bytes_inuse, bytes_max, dirty_inuse;
u_int pct_dirty, pct_full;
bool clean_needed, dirty_needed;

conn = S2C(session);
cache = conn->cache;
cache = S2C(session)->cache;

/*
* If the connection is closing we do not need eviction from an
* application thread. The eviction subsystem is already closed.
*/
if (F_ISSET(conn, WT_CONN_CLOSING))
if (F_ISSET(S2C(session), WT_CONN_CLOSING))
return (false);

/*
* Avoid division by zero if the cache size has not yet been set in a
* shared cache.
*/
bytes_max = conn->cache_size + 1;
bytes_inuse = __wt_cache_bytes_inuse(cache);
dirty_inuse = __wt_cache_dirty_leaf_inuse(cache);
clean_needed = __wt_eviction_clean_needed(session, &pct_full);
dirty_needed = __wt_eviction_dirty_needed(session, &pct_dirty);

/*
* Calculate the cache full percentage; anything over the trigger means
* we involve the application thread.
*/
if (pct_fullp != NULL) {
pct_full = (u_int)((100 * bytes_inuse) / bytes_max);
pct_dirty = (u_int)((100 * dirty_inuse) / bytes_max);

if (pct_fullp != NULL)
*pct_fullp = (u_int)WT_MAX(0, 100 - WT_MIN(
(int)cache->eviction_trigger - (int)pct_full,
(int)cache->eviction_dirty_trigger - (int)pct_dirty));
}

/*
* Only check the dirty trigger when the session is not busy.
Expand All @@ -268,11 +310,7 @@ __wt_eviction_needed(WT_SESSION_IMPL *session, bool busy, u_int *pct_fullp)
* The next transaction in this session will not be able to start until
* the cache is under the limit.
*/
if ((dirty_trigger = cache->eviction_scrub_limit) < 1.0)
dirty_trigger = (double)cache->eviction_dirty_trigger;
return (bytes_inuse > (cache->eviction_trigger * bytes_max) / 100 ||
(!busy &&
dirty_inuse > (uint64_t)(dirty_trigger * bytes_max) / 100));
return (clean_needed || (!busy && dirty_needed));
}

/*
Expand Down
3 changes: 2 additions & 1 deletion src/txn/txn_ckpt.c
Expand Up @@ -423,7 +423,8 @@ __checkpoint_reduce_dirty_cache(WT_SESSION_IMPL *session)
* level.
*/
__wt_sleep(0, 10 * stepdown_us);
cache->eviction_scrub_limit = current_dirty - delta;
cache->eviction_scrub_limit =
WT_MAX(cache->eviction_dirty_target, current_dirty - delta);
WT_STAT_CONN_SET(session, txn_checkpoint_scrub_target,
cache->eviction_scrub_limit);
WT_RET(__wt_epoch(session, &last));
Expand Down

0 comments on commit d85e189

Please sign in to comment.