Skip to content

Commit

Permalink
bump log priorities to warning, wrap all hiedb calls in runWithDb and…
Browse files Browse the repository at this point in the history
… writerThread with retries, promote time duration and maxRetryCount constants to top level
  • Loading branch information
eddiemundo committed Dec 27, 2021
1 parent c3fd649 commit 1bc3adc
Showing 1 changed file with 32 additions and 15 deletions.
47 changes: 32 additions & 15 deletions ghcide/session-loader/Development/IDE/Session.hs
Expand Up @@ -199,13 +199,13 @@ retryOnSqliteBusy logger hieDb maxDelay !baseDelay !maxRetryCount rng f = do
let (delay, newRng) = Random.randomR (0, newBaseDelay) rng
let newMaxRetryCount = maxRetryCount - 1
liftIO $ do
logInfo logger $ "Retrying - " <> makeLogMsgComponentsText (Right delay) newMaxRetryCount e
logWarning logger $ "Retrying - " <> makeLogMsgComponentsText (Right delay) newMaxRetryCount e
threadDelay delay
retryOnSqliteBusy logger hieDb maxDelay newBaseDelay newMaxRetryCount newRng f

| otherwise -> do
liftIO $ do
logInfo logger $ "Retries exhausted - " <> makeLogMsgComponentsText (Left baseDelay) maxRetryCount e
logWarning logger $ "Retries exhausted - " <> makeLogMsgComponentsText (Left baseDelay) maxRetryCount e
throwIO e

Right b -> pure b
Expand All @@ -224,36 +224,53 @@ retryOnSqliteBusy logger hieDb maxDelay !baseDelay !maxRetryCount rng f = do
in
T.intercalate ", " logMsgComponents

-- | in microseconds
oneSecond :: Int
oneSecond = 1000000

-- | in microseconds
oneMillisecond :: Int
oneMillisecond = 1000

-- | default maximum number of times to retry hiedb call
maxRetryCount :: Int
maxRetryCount = 10

-- | Wraps `withHieDb` to provide a database connection for reading, and a `HieWriterChan` for
-- writing. Actions are picked off one by one from the `HieWriterChan` and executed in serial
-- by a worker thread using a dedicated database connection.
-- This is done in order to serialize writes to the database, or else SQLite becomes unhappy
runWithDb :: Logger -> FilePath -> (WithHieDb -> IndexQueue -> IO ()) -> IO ()
runWithDb logger fp k = do
-- use non-deterministic seed because maybe multiple HLS start at same time
-- and send bursts of requests
rng <- Random.newStdGen
-- Delete the database if it has an incompatible schema version
withHieDb fp (const $ pure ())
withHieDb fp (const (pure ()) . makeWithRetryableHieDb rng)
`Safe.catch` \IncompatibleSchemaVersion{} -> removeFile fp
withHieDb fp $ \writedb -> do
initConn writedb
-- the type signature is necessary to avoid concretizing the RankNType
-- e.g. using it with initConn will set tyvar a to ()
let withRetryableWriteDb :: WithHieDb
withRetryableWriteDb = makeWithRetryableHieDb rng writedb
withRetryableWriteDb initConn

chan <- newTQueueIO
-- use newStdGen because what if multiple HLS start at same time and send bursts of requests
rng <- Random.newStdGen
withAsync (writerThread writedb chan rng) $ \_ -> do
withHieDb fp (\readDb -> k (retryOnSqliteBusy logger readDb oneSecond oneMillisecond maxRetryCount rng) chan)

withAsync (writerThread withRetryableWriteDb chan) $ \_ -> do
withHieDb fp (\readDb -> k (makeWithRetryableHieDb rng readDb) chan)
where
oneSecond = 1000000
oneMillisecond = 1000
maxRetryCount = 10
makeWithRetryableHieDb :: RandomGen g => g -> HieDb -> WithHieDb
makeWithRetryableHieDb rng hieDb = retryOnSqliteBusy logger hieDb oneSecond oneMillisecond maxRetryCount rng

writerThread db chan rng = do
writerThread :: WithHieDb -> IndexQueue -> IO ()
writerThread withRetryableWriteDb chan = do
-- Clear the index of any files that might have been deleted since the last run
deleteMissingRealFiles db
_ <- garbageCollectTypeNames db
_ <- withRetryableWriteDb deleteMissingRealFiles
_ <- withRetryableWriteDb garbageCollectTypeNames
forever $ do
k <- atomically $ readTQueue chan
k (retryOnSqliteBusy logger db oneSecond oneMillisecond maxRetryCount rng)
k withRetryableWriteDb
`Safe.catch` \e@SQLError{} -> do
logDebug logger $ T.pack $ "SQLite error in worker, ignoring: " ++ show e
`Safe.catchAny` \e -> do
Expand Down

0 comments on commit 1bc3adc

Please sign in to comment.