Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CLI flags to config LevelDB table/total sizes #981

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ func initGenesis(ctx *cli.Context) error {
defer stack.Close()

for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false, rawdb.ExtraDBConfig{})
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
Expand Down Expand Up @@ -229,7 +229,7 @@ func dumpGenesis(ctx *cli.Context) error {
// dump whatever already exists in the datadir
stack, _ := makeConfigNode(ctx)
for _, name := range []string{"chaindata", "lightchaindata"} {
db, err := stack.OpenDatabase(name, 0, 0, "", true)
db, err := stack.OpenDatabase(name, 0, 0, "", true, rawdb.ExtraDBConfig{})

if err != nil {
if !os.IsNotExist(err) {
Expand Down
2 changes: 1 addition & 1 deletion cmd/geth/dao_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
// Retrieve the DAO config flag from the database
path := filepath.Join(datadir, "geth", "chaindata")

db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false)
db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false, rawdb.ExtraDBConfig{})
if err != nil {
t.Fatalf("test %d: failed to open test database: %v", test, err)
}
Expand Down
40 changes: 38 additions & 2 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,31 @@ var (
Value: 50,
Category: flags.PerfCategory,
}

LevelDbCompactionTableSizeFlag = &cli.Uint64Flag{
Name: "leveldb.compaction.table.size",
Usage: "LevelDB SSTable/file size in mebibytes",
Category: flags.PerfCategory,
}

LevelDbCompactionTableSizeMultiplierFlag = &cli.Float64Flag{
Name: "leveldb.compaction.table.size.multiplier",
Usage: "Multiplier on LevelDB SSTable/file size. Size for a level is determined by: `leveldb.compaction.table.size * (leveldb.compaction.table.size.multiplier ^ Level)`",
Category: flags.PerfCategory,
}

LevelDbCompactionTotalSizeFlag = &cli.Uint64Flag{
Name: "leveldb.compaction.total.size",
Usage: "Total size in mebibytes of SSTables in a given LevelDB level. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)`",
Category: flags.PerfCategory,
}

LevelDbCompactionTotalSizeMultiplierFlag = &cli.Float64Flag{
Name: "leveldb.compaction.total.size.multiplier",
Usage: "Multiplier on level size on LevelDB levels. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)`",
Category: flags.PerfCategory,
}

CacheTrieFlag = &cli.IntFlag{
Name: "cache.trie",
Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)",
Expand Down Expand Up @@ -2287,6 +2312,8 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.

err error
chainDb ethdb.Database

dbOptions = resolveExtraDBConfig(ctx)
)

switch {
Expand All @@ -2300,9 +2327,9 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.

chainDb = remotedb.New(client)
case ctx.String(SyncModeFlag.Name) == "light":
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly, dbOptions)
default:
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly)
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, dbOptions)
}

if err != nil {
Expand All @@ -2312,6 +2339,15 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
return chainDb
}

func resolveExtraDBConfig(ctx *cli.Context) rawdb.ExtraDBConfig {
return rawdb.ExtraDBConfig{
LevelDBCompactionTableSize: ctx.Uint64(LevelDbCompactionTableSizeFlag.Name),
LevelDBCompactionTableSizeMultiplier: ctx.Float64(LevelDbCompactionTableSizeMultiplierFlag.Name),
LevelDBCompactionTotalSize: ctx.Uint64(LevelDbCompactionTotalSizeFlag.Name),
LevelDBCompactionTotalSizeMultiplier: ctx.Float64(LevelDbCompactionTotalSizeMultiplierFlag.Name),
}
}

func IsNetworkPreset(ctx *cli.Context) bool {
for _, flag := range NetworkFlags {
bFlag, _ := flag.(*cli.BoolFlag)
Expand Down
8 changes: 4 additions & 4 deletions core/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
} else {
dir := b.TempDir()

db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false, rawdb.ExtraDBConfig{})
if err != nil {
b.Fatalf("cannot create temporary database: %v", err)
}
Expand Down Expand Up @@ -296,7 +296,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
func benchWriteChain(b *testing.B, full bool, count uint64) {
for i := 0; i < b.N; i++ {
dir := b.TempDir()
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{})

if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
Expand All @@ -310,7 +310,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) {
func benchReadChain(b *testing.B, full bool, count uint64) {
dir := b.TempDir()

db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{})
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
Expand All @@ -325,7 +325,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{})
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
Expand Down
26 changes: 22 additions & 4 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,8 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database {

// NewLevelDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
db, err := leveldb.New(file, cache, handles, namespace, readonly)
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool, extraDBConfig ExtraDBConfig) (ethdb.Database, error) {
db, err := leveldb.New(file, cache, handles, namespace, readonly, resolveLevelDBConfig(extraDBConfig))
if err != nil {
return nil, err
}
Expand All @@ -332,6 +332,15 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
return NewDatabase(db), nil
}

func resolveLevelDBConfig(config ExtraDBConfig) leveldb.LevelDBConfig {
return leveldb.LevelDBConfig{
CompactionTableSize: config.LevelDBCompactionTableSize,
CompactionTableSizeMultiplier: config.LevelDBCompactionTableSizeMultiplier,
CompactionTotalSize: config.LevelDBCompactionTotalSize,
CompactionTotalSizeMultiplier: config.LevelDBCompactionTotalSizeMultiplier,
}
}

const (
dbPebble = "pebble"
dbLeveldb = "leveldb"
Expand Down Expand Up @@ -366,6 +375,14 @@ type OpenOptions struct {
Cache int // the capacity(in megabytes) of the data caching
Handles int // number of files to be open simultaneously
ReadOnly bool
ExtraDBConfig ExtraDBConfig
}

type ExtraDBConfig struct {
LevelDBCompactionTableSize uint64 // LevelDB SSTable/file size in mebibytes
LevelDBCompactionTableSizeMultiplier float64 // Multiplier on LevelDB SSTable/file size
LevelDBCompactionTotalSize uint64 // Total size in mebibytes of SSTables in a given LevelDB level
LevelDBCompactionTotalSizeMultiplier float64 // Multiplier on level size on LevelDB levels
}

// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
Expand Down Expand Up @@ -393,9 +410,10 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
return nil, fmt.Errorf("unknown db.engine %v", o.Type)
}

log.Info("Using leveldb as the backing database")
// Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
log.Info("Using leveldb as the backing database")

return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.ExtraDBConfig)
}

// Open opens both a disk-based key-value database such as leveldb or pebble, but also
Expand Down
10 changes: 10 additions & 0 deletions docs/cli/server.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,16 @@ The ```bor server``` command runs the Bor client.

- ```fdlimit```: Raise the open file descriptor resource limit (default = system fd limit) (default: 0)

### ExtraDB Options

- ```leveldb.compaction.table.size```: LevelDB SSTable/file size in mebibytes (default: 2)

- ```leveldb.compaction.table.size.multiplier```: Multiplier on LevelDB SSTable/file size. Size for a level is determined by: `leveldb.compaction.table.size * (leveldb.compaction.table.size.multiplier ^ Level)` (default: 1)

- ```leveldb.compaction.total.size```: Total size in mebibytes of SSTables in a given LevelDB level. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)` (default: 10)

- ```leveldb.compaction.total.size.multiplier```: Multiplier on level size on LevelDB levels. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)` (default: 10)

### JsonRPC Options

- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) (default: 50000000)
Expand Down
12 changes: 11 additions & 1 deletion eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)

// Assemble the Ethereum object
chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "ethereum/db/chaindata/", false)
extraDBConfig := resolveExtraDBConfig(config)
chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "ethereum/db/chaindata/", false, extraDBConfig)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -332,6 +333,15 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
return ethereum, nil
}

func resolveExtraDBConfig(config *ethconfig.Config) rawdb.ExtraDBConfig {
return rawdb.ExtraDBConfig{
LevelDBCompactionTableSize: config.LevelDbCompactionTableSize,
LevelDBCompactionTableSizeMultiplier: config.LevelDbCompactionTableSizeMultiplier,
LevelDBCompactionTotalSize: config.LevelDbCompactionTotalSize,
LevelDBCompactionTotalSizeMultiplier: config.LevelDbCompactionTotalSizeMultiplier,
}
}

func makeExtraData(extra []byte) []byte {
if len(extra) == 0 {
// create default extradata
Expand Down
6 changes: 6 additions & 0 deletions eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,12 @@ type Config struct {
DatabaseCache int
DatabaseFreezer string

// Database - LevelDB options
LevelDbCompactionTableSize uint64
LevelDbCompactionTableSizeMultiplier float64
LevelDbCompactionTotalSize uint64
LevelDbCompactionTotalSizeMultiplier float64

TrieCleanCache int
TrieCleanCacheJournal string `toml:",omitempty"` // Disk journal directory for trie cache to survive node restarts
TrieCleanCacheRejournal time.Duration `toml:",omitempty"` // Time interval to regenerate the journal for clean cache
Expand Down
6 changes: 3 additions & 3 deletions eth/filters/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {

b.Log("Running bloombits benchmark section size:", sectionSize)

db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{})
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand Down Expand Up @@ -145,7 +145,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{})
backend = &testBackend{db: db, sections: cnt}
sys = NewFilterSystem(backend, Config{})
}
Expand Down Expand Up @@ -187,7 +187,7 @@ func BenchmarkNoBloomBits(b *testing.B) {

b.Log("Running benchmark without bloombits")

db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{})
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand Down
4 changes: 2 additions & 2 deletions eth/filters/filter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func makeReceipt(addr common.Address) *types.Receipt {

func BenchmarkFilters(b *testing.B) {
var (
db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false)
db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false, rawdb.ExtraDBConfig{})
_, sys = newTestFilterSystem(b, db, Config{})
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
Expand Down Expand Up @@ -106,7 +106,7 @@ func BenchmarkFilters(b *testing.B) {

func TestFilters(t *testing.T) {
var (
db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false)
db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false, rawdb.ExtraDBConfig{})
_, sys = newTestFilterSystem(t, db, Config{})
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
Expand Down
34 changes: 32 additions & 2 deletions ethdb/leveldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,16 @@ type Database struct {
log log.Logger // Contextual logger tracking the database path
}

type LevelDBConfig struct {
CompactionTableSize uint64 // LevelDB SSTable/file size in mebibytes
CompactionTableSizeMultiplier float64 // Multiplier on LevelDB SSTable/file size
CompactionTotalSize uint64 // Total size in mebibytes of SSTables in a given LevelDB level
CompactionTotalSizeMultiplier float64 // Multiplier on level size on LevelDB levels
}

// New returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) {
func New(file string, cache int, handles int, namespace string, readonly bool, config LevelDBConfig) (*Database, error) {
return NewCustom(file, namespace, func(options *opt.Options) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
Expand All @@ -100,6 +107,22 @@ func New(file string, cache int, handles int, namespace string, readonly bool) (
options.BlockCacheCapacity = cache / 2 * opt.MiB
options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally

if config.CompactionTableSize != 0 {
options.CompactionTableSize = int(config.CompactionTableSize * opt.MiB)
rroblak marked this conversation as resolved.
Show resolved Hide resolved
}

if config.CompactionTableSizeMultiplier != 0 {
options.CompactionTableSizeMultiplier = config.CompactionTableSizeMultiplier
}

if config.CompactionTotalSize != 0 {
options.CompactionTotalSize = int(config.CompactionTotalSize * opt.MiB)
}

if config.CompactionTotalSizeMultiplier != 0 {
options.CompactionTotalSizeMultiplier = config.CompactionTotalSizeMultiplier
}

if readonly {
options.ReadOnly = true
}
Expand All @@ -114,7 +137,14 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
logger := log.New("database", file)
usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2

logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()}
logCtx := []interface{}{
"cache", common.StorageSize(usedCache),
"handles", options.GetOpenFilesCacheCapacity(),
"compactionTableSize", options.CompactionTableSize,
"compactionTableSizeMultiplier", options.CompactionTableSizeMultiplier,
"compactionTotalSize", options.CompactionTotalSize,
"compactionTotalSizeMultiplier", options.CompactionTotalSizeMultiplier}

if options.ReadOnly {
logCtx = append(logCtx, "readonly", "true")
}
Expand Down
25 changes: 25 additions & 0 deletions internal/cli/server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ type Config struct {
// Cache has the cache related settings
Cache *CacheConfig `hcl:"cache,block" toml:"cache,block"`

ExtraDB *ExtraDBConfig `hcl:"leveldb,block" toml:"leveldb,block"`

// Account has the validator account related settings
Accounts *AccountsConfig `hcl:"accounts,block" toml:"accounts,block"`

Expand Down Expand Up @@ -551,6 +553,13 @@ type CacheConfig struct {
FDLimit int `hcl:"fdlimit,optional" toml:"fdlimit,optional"`
}

type ExtraDBConfig struct {
LevelDbCompactionTableSize uint64 `hcl:"compactiontablesize,optional" toml:"compactiontablesize,optional"`
LevelDbCompactionTableSizeMultiplier float64 `hcl:"compactiontablesizemultiplier,optional" toml:"compactiontablesizemultiplier,optional"`
LevelDbCompactionTotalSize uint64 `hcl:"compactiontotalsize,optional" toml:"compactiontotalsize,optional"`
LevelDbCompactionTotalSizeMultiplier float64 `hcl:"compactiontotalsizemultiplier,optional" toml:"compactiontotalsizemultiplier,optional"`
}

type AccountsConfig struct {
// Unlock is the list of addresses to unlock in the node
Unlock []string `hcl:"unlock,optional" toml:"unlock,optional"`
Expand Down Expand Up @@ -742,6 +751,14 @@ func DefaultConfig() *Config {
TrieTimeout: 60 * time.Minute,
FDLimit: 0,
},
ExtraDB: &ExtraDBConfig{
// These are LevelDB defaults, specifying here for clarity in code and in logging.
// See: https://github.com/syndtr/goleveldb/blob/126854af5e6d8295ef8e8bee3040dd8380ae72e8/leveldb/opt/options.go
rroblak marked this conversation as resolved.
Show resolved Hide resolved
LevelDbCompactionTableSize: 2, // MiB
LevelDbCompactionTableSizeMultiplier: 1,
LevelDbCompactionTotalSize: 10, // MiB
LevelDbCompactionTotalSizeMultiplier: 10,
},
Accounts: &AccountsConfig{
Unlock: []string{},
PasswordFile: "",
Expand Down Expand Up @@ -1101,6 +1118,14 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*
n.TriesInMemory = c.Cache.TriesInMemory
}

// LevelDB
{
n.LevelDbCompactionTableSize = c.ExtraDB.LevelDbCompactionTableSize
n.LevelDbCompactionTableSizeMultiplier = c.ExtraDB.LevelDbCompactionTableSizeMultiplier
n.LevelDbCompactionTotalSize = c.ExtraDB.LevelDbCompactionTotalSize
n.LevelDbCompactionTotalSizeMultiplier = c.ExtraDB.LevelDbCompactionTotalSizeMultiplier
}

n.RPCGasCap = c.JsonRPC.GasCap
if n.RPCGasCap != 0 {
log.Info("Set global gas cap", "cap", n.RPCGasCap)
Expand Down