Skip to content

Commit

Permalink
Merge pull request #171 from domMayhew/support-postgres
Browse files Browse the repository at this point in the history
Create scaffolding for adding Postgres support
  • Loading branch information
KtorZ committed Jun 12, 2024
2 parents 2de4b1a + dbfa027 commit 5d8f52c
Show file tree
Hide file tree
Showing 17 changed files with 3,198 additions and 1,640 deletions.
19 changes: 14 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ VERSION := $(shell cat package.yaml| grep "version:" | sed "s/[^0-9]*\([0-9]\)\(
TAG := $(shell echo $(VERSION) | sed "s/^0$$/nightly/")
CONFIG := $(shell pwd)/config/network/$(NETWORK)
CACHEDIR := ${HOME}/.cache/kupo/${NETWORK}
BUILD_DIR_PATH := dist-newstyle/build/$(ARCH)-$(OS)/ghc-$(GHC)/kupo-$(VERSION)
BIN_DIR_PATH := $(BUILD_DIR_PATH)/x/kupo/build/kupo

ifeq ($(postgres), true)
PG_FLAG := -f postgres
endif

all: $(OUT)/bin/kupo \
$(OUT)/share/zsh/site-functions/_kupo \
Expand Down Expand Up @@ -46,10 +52,10 @@ $(OUT)/share/kupo/LICENSE:
@mkdir -p $(@D)
@cp LICENSE $@

dist-newstyle/build/$(ARCH)-$(OS)/ghc-$(GHC)/kupo-$(VERSION)/x/kupo/build/kupo/kupo:
@nix develop $(NIX_SHELL) $(NIX_OPTS) --command bash -c "cabal build kupo:exe:kupo"
$(BIN_DIR_PATH)/kupo:
@nix develop $(NIX_SHELL) $(NIX_OPTS) --command bash -c "cabal build kupo:exe:kupo $(PG_FLAG)"

$(OUT)/bin/kupo: dist-newstyle/build/$(ARCH)-$(OS)/ghc-$(GHC)/kupo-$(VERSION)/x/kupo/build/kupo/kupo
$(OUT)/bin/kupo: $(BIN_DIR_PATH)/kupo
@mkdir -p $(@D)
@echo "$^ → $(@D)/kupo"
@cp $^ $(@D)
Expand All @@ -67,7 +73,7 @@ ifeq ($(ARCH),x86_64)
else
nix develop $(NIX_SHELL) $(NIX_OPTS) --command bash -c "cat /nix/store/hviyb5sciblcyr5fc3vsqcwmfh1nz69w-cabal.project.local >> cabal.project.local"
endif
nix develop $(NIX_SHELL) $(NIX_OPTS) --command bash -c "cabal update && cabal freeze -f +production"
nix develop $(NIX_SHELL) $(NIX_OPTS) --command bash -c "cabal update && cabal freeze -f +production $(PG_FLAG)"

archive: kupo-$(TAG)-$(ARCH)-$(OS).tar.gz # Package the application as a tarball

Expand Down Expand Up @@ -97,7 +103,10 @@ doc: # Serve the rendered documentation on \033[0;33m<http://localhost:8000>\033
@cd docs && python -m SimpleHTTPServer

clean: # Remove build artifacts
(rm -r $(OUT) 2>/dev/null && echo "Build artifacts removed.") || echo "Nothing to remove."
rm -r $(OUT) 2>/dev/null && echo "Removed $(OUT)" || \
rm -r $(BUILD_DIR_PATH) 2>/dev/null && echo "Removed $(BUILD_DIR_PATH)" || \
rm cabal.project.freeze 2>/dev/null && echo "Removed cabal.project.freeze" || true
echo "Finished cleaning."

clean-all: clean # Remove build artifacts & build cache
cabal clean
Expand Down
22 changes: 21 additions & 1 deletion kupo.cabal

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

87 changes: 20 additions & 67 deletions src/Kupo.hs
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,6 @@ import Kupo.Prelude
import Control.Exception.Safe
( isAsyncException
)
import Data.Pool
( defaultPoolConfig
, destroyAllResources
, newPool
, tryWithResource
, withResource
)
import GHC.Conc
( getNumCapabilities
)
import Kupo.App
( ChainSyncClient
, TraceConsumer (..)
Expand All @@ -72,14 +62,12 @@ import Kupo.App.Configuration
, startOrResume
)
import Kupo.App.Database
( copyDatabase
, newDBPool
)
import Kupo.App.Database.Types
( ConnectionType (..)
, Database (..)
, copyDatabase
, createShortLivedConnection
, newDatabaseFile
, newLock
, withLongLivedConnection
, withShortLivedConnection
, DBPool (..)
)
import Kupo.App.Health
( connectionStatusToggle
Expand Down Expand Up @@ -203,49 +191,31 @@ kupoWith tr withProducer withFetchBlock =
, configuration = config@Configuration
{ serverHost
, serverPort
, workDir
, databaseLocation
, inputManagement
, longestRollback
, deferIndexes
}
} <- ask

(maxConcurrentWriters, maxConcurrentReaders) <- liftIO getNumCapabilities <&> \n -> (n, 5 * n)
dbPool@DBPool { maxConcurrentReaders, maxConcurrentWriters } <- liftIO $ newDBPool
(tracerDatabase tr)
(isReadOnlyReplica config)
databaseLocation
longestRollback

liftIO $ logWith (tracerConfiguration tr) $
ConfigurationMaxConcurrency
{ maxConcurrentReaders
, maxConcurrentWriters = if isReadOnlyReplica config then 0 else maxConcurrentWriters
}

dbFile <- newDatabaseFile (tracerDatabase tr) workDir

lock <- liftIO newLock

readOnlyPool <- liftIO $ newPool $ defaultPoolConfig
(createShortLivedConnection (tracerDatabase tr) ReadOnly lock longestRollback dbFile)
(\Database{close} -> close)
600
maxConcurrentReaders

readWritePool <- liftIO $ newPool $ defaultPoolConfig
(createShortLivedConnection (tracerDatabase tr) ReadWrite lock longestRollback dbFile)
(\Database{close} -> close)
30
maxConcurrentWriters
ConfigurationMaxConcurrency
{ maxConcurrentReaders
, maxConcurrentWriters
}

let run action
| isReadOnlyReplica config =
-- NOTE: 'ShortLived' is a bad name here. What it really means is 'occasional
-- writers but mostly readers'. However, in the 'ReadOnlyReplica' mode we only
-- ever allow read-only connections and never perform a single write.
withShortLivedConnection
(tracerDatabase tr)
ReadOnly
lock
longestRollback
dbFile
(action InstallIndexesIfNotExist)
(withDatabaseBlocking dbPool) ReadOnly (action InstallIndexesIfNotExist)
| otherwise =
handle
(\NodeTipHasBeenReached{distance} -> do
Expand All @@ -255,16 +225,8 @@ kupoWith tr withProducer withFetchBlock =
(io deferIndexes)
where
io indexMode =
withLongLivedConnection
(tracerDatabase tr)
lock
longestRollback
dbFile
indexMode
(action indexMode)
`finally` do
destroyAllResources readOnlyPool
destroyAllResources readWritePool
(withDatabaseExclusiveWriter dbPool) indexMode (action indexMode)
`finally` destroyResources dbPool

liftIO $ handle (onUnknownException crashWith) $ run $ \indexMode db -> do
patterns <- newPatternsCache (tracerConfiguration tr) config db
Expand All @@ -278,16 +240,7 @@ kupoWith tr withProducer withFetchBlock =
-- HTTP Server
( httpServer
(tracerHttp tr)
(\case
ReadOnly ->
tryWithResource readOnlyPool
ReadWrite | isReadOnlyReplica config ->
const (fail "cannot acquire read/write connection on read-only replica.")
ReadWrite ->
tryWithResource readWritePool
WriteOnly ->
const (fail "impossible: tried to acquire WriteOnly database?")
)
(tryWithDatabase dbPool)
forceRollback
fetchBlock
patterns
Expand Down Expand Up @@ -319,7 +272,7 @@ kupoWith tr withProducer withFetchBlock =
(tracerGardener tr)
config
patterns
(withResource readWritePool)
(withDatabaseBlocking dbPool ReadWrite)
)

-- Block producer, fetching blocks from the network
Expand Down
2 changes: 1 addition & 1 deletion src/Kupo/App.hs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import Kupo.App.Configuration
( TraceConfiguration (..)
, parseNetworkParameters
)
import Kupo.App.Database
import Kupo.App.Database.Types
( DBTransaction
, Database (..)
)
Expand Down
2 changes: 1 addition & 1 deletion src/Kupo/App/Configuration.hs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import Data.Aeson.Lens
( _String
, key
)
import Kupo.App.Database
import Kupo.App.Database.Types
( Database (..)
)
import Kupo.Control.MonadCatch
Expand Down
Loading

0 comments on commit 5d8f52c

Please sign in to comment.