Skip to content

Commit

Permalink
Allow individual database configuration (#8)
Browse files Browse the repository at this point in the history
* Allow individual database configuration

* Update README.md
  • Loading branch information
evgeniy-scherbina authored Jul 19, 2024
1 parent 15bb6b0 commit a2f11f6
Show file tree
Hide file tree
Showing 3 changed files with 111 additions and 33 deletions.
27 changes: 26 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,31 @@ There are 2 main scenarios:
- opendb loads stored rocksdb configuration and starts with it
- opendb overrides options which explicitly specified in appOpts (app.toml)

#### Individual database configuration

`app.toml` example:
```toml
[rocksdb]
enable-metrics = true
report-metrics-interval-secs = 15
max-open-files = 16384
...

[rocksdb.blockstore]
max-open-files = -1
block_size = 4096

[rocksdb.tx_index]
max-open-files = -1
block_size = 4096
```

`[rocksdb]` section contains `default`/`fallback` database configuration, but you can override it with `database-specific` configuration

it's very convenient approach, because rocksdb configuration is pretty extensive, contains around 30 parameters, but usually we override only few of them

so we can define standard configuration in `[rocksdb]` section, and then override only few params in `database-specific` configurations

### List of databases:

| Name | Subsystem | IAVL V1 size as of 10.5 millions blocks | IAVL V1 number of SST files as of 10.5 millions blocks |
Expand All @@ -45,7 +70,7 @@ There are 2 main scenarios:
| state.db | Ethermint | 282 GB | 5.5k |
| tx_index.db | Ethermint | 504 GB | 11.6k |
| evidence.db | Ethermint | 28 MB | 270 |
| evm_indexer.db | Ethermint | 2.2 GB | 230 |
| evmindexer.db | Ethermint | 2.2 GB | 230 |

### List of reported rocksdb metrics:

Expand Down
95 changes: 63 additions & 32 deletions opendb_rocksdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,39 +42,39 @@ const (

DefaultColumnFamilyName = "default"

enableMetricsOptName = "rocksdb.enable-metrics"
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
enableMetricsOptName = "enable-metrics"
reportMetricsIntervalSecsOptName = "report-metrics-interval-secs"
defaultReportMetricsIntervalSecs = 15

maxOpenFilesDBOptName = "rocksdb.max-open-files"
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
useFsyncDBOptName = "rocksdb.use_fsync"
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"

writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
numLevelsCFOptName = "rocksdb.num-levels"
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"

blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
blockSizeBBTOOptName = "rocksdb.block_size"
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "rocksdb.format_version"

asyncIOReadOptName = "rocksdb.read-async-io"
maxOpenFilesDBOptName = "max-open-files"
maxFileOpeningThreadsDBOptName = "max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "table_cache_numshardbits"
allowMMAPWritesDBOptName = "allow_mmap_writes"
allowMMAPReadsDBOptName = "allow_mmap_reads"
useFsyncDBOptName = "use_fsync"
useAdaptiveMutexDBOptName = "use_adaptive_mutex"
bytesPerSyncDBOptName = "bytes_per_sync"
maxBackgroundJobsDBOptName = "max-background-jobs"

writeBufferSizeCFOptName = "write-buffer-size"
numLevelsCFOptName = "num-levels"
maxWriteBufferNumberCFOptName = "max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "target_file_size_base"
targetFileSizeMultiplierCFOptName = "target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "level0_slowdown_writes_trigger"

blockCacheSizeBBTOOptName = "block_cache_size"
bitsPerKeyBBTOOptName = "bits_per_key"
blockSizeBBTOOptName = "block_size"
cacheIndexAndFilterBlocksBBTOOptName = "cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "format_version"

asyncIOReadOptName = "read-async-io"
)

// AppOptions is the same interface as provided by cosmos-sdk, see for details:
Expand All @@ -84,9 +84,40 @@ type AppOptions interface {
Get(string) interface{}
}

// rocksDBOptions implements AppOptions interface.
// It does it by wrapping another AppOptions, but also takes into account dbName.
type rocksDBOptions struct {
appOpts AppOptions
dbName string
}

func newRocksDBOptions(appOpts AppOptions, dbName string) *rocksDBOptions {
return &rocksDBOptions{
appOpts: appOpts,
dbName: dbName,
}
}

// Get constructs database-specific and fallback keys and use them to get value from underlying AppOptions.
// Database-specific key takes precedence over fallback key.
func (opts *rocksDBOptions) Get(key string) interface{} {
// get value using database-specific key
dbSpecificKey := fmt.Sprintf("rocksdb.%v.%v", opts.dbName, key)
if opts.appOpts.Get(dbSpecificKey) != nil {
return opts.appOpts.Get(dbSpecificKey)
}

// get value using fallback key
fallbackKey := fmt.Sprintf("rocksdb.%v", key)
return opts.appOpts.Get(fallbackKey)
}

func OpenDB(appOpts AppOptions, dataDir string, dbName string, backendType dbm.BackendType) (dbm.DB, error) {
// wrap AppOptions with rocksDBOptions to make sure dbName is considered when applying configuration
// it allows individual database configuration
rocksDBOpts := newRocksDBOptions(appOpts, dbName)
if backendType == dbm.RocksDBBackend {
return openRocksdb(dataDir, dbName, appOpts)
return openRocksdb(dataDir, dbName, rocksDBOpts)
}

return dbm.NewDB(dbName, backendType, dataDir)
Expand Down
22 changes: 22 additions & 0 deletions opendb_rocksdb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,28 @@ func (m *mockAppOptions) Get(key string) interface{} {
return m.opts[key]
}

func TestRocksDBOptions(t *testing.T) {
mockAppOptions := newMockAppOptions(map[string]interface{}{
// fallback configuration
"rocksdb.max-open-files": 16_384,
"rocksdb.block_size": 16_384,

// database-specific configuration
"rocksdb.tx_index.max-open-files": 4096,
"rocksdb.tx_index.block_size": 4096,
})

// there isn't database-specific configuration for application database, so fallback to fallback configuration
appDBOpts := newRocksDBOptions(mockAppOptions, "application")
require.Equal(t, 16_384, appDBOpts.Get("max-open-files"))
require.Equal(t, 16_384, appDBOpts.Get("block_size"))

// there is database-specific configuration for tx_index database, so use it instead of fallback configuration
txIndexDBOpts := newRocksDBOptions(mockAppOptions, "tx_index")
require.Equal(t, 4096, txIndexDBOpts.Get("max-open-files"))
require.Equal(t, 4096, txIndexDBOpts.Get("block_size"))
}

func TestOpenRocksdb(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
Expand Down

0 comments on commit a2f11f6

Please sign in to comment.