From 5067f27ae0738de15e7e63756f6cff52daf8a2d9 Mon Sep 17 00:00:00 2001 From: Joshua Noble Date: Fri, 22 Dec 2023 15:09:08 -0500 Subject: [PATCH] Add support for flatfs by default (#55) * Add support (and documentation) for configurable blockstores * Add support for FlatFS * Enable FlatFS by default --------- Co-authored-by: Adin Schmahmann --- README.md | 24 +++++++++-- docs/blockstores.md | 33 ++++++++++++++ gc.go | 1 - go.mod | 2 + go.sum | 13 ++++++ main.go | 9 +++- setup.go | 103 ++++++++++++++++++++++++-------------------- 7 files changed, 133 insertions(+), 52 deletions(-) create mode 100644 docs/blockstores.md diff --git a/README.md b/README.md index 16121e5..78e4089 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ ci coverage GitHub release - godoc reference + godoc reference


@@ -29,9 +29,9 @@ Rainbow uses the same Go code as the HTTP gateway in Kubo, but is fully speciali * Rainbow acts as DHT and Bitswap client only. Rainbow is not a server for the network. * Rainbow does not pin, or permanently store any content. It is just meant - to act as gateway to content present in the network. GC strategy + to act as gateway to content present in the network. GC strategy * Rainbow settings are optimized for production deployments and streamlined - for specific choices (badger datastore, writethrough uncached blockstore + for specific choices (flatfs datastore, writethrough uncached blockstore etc.) * Denylist and denylist subscription support is included. * And more to come... @@ -89,6 +89,24 @@ Denylists can be manually placed in the `$RAINBOW_DATADIR/denylists` folder too. See [NoPFS](https://github.com/ipfs-shipyard/nopfs) for an explanation of the denylist format. Note that denylists should only be appended to while Rainbow is running. Editing differently, or adding new denylist files, should be done with Rainbow stopped. +## Blockstores + +Rainbow ships with a number of possible blockstores for the purposes of caching data locally. +Because Rainbow, as a gateway-only IPFS implementation, is not designed for long-term data storage there are no long +term guarantees of support for any particular backing data storage. + +See [Blockstores](./docs/blockstores.md) for more details. + +## Garbage Collection + +Over time, the datastore can fill up with previously fetched blocks. To free up this used disk space, garbage collection can be run. Garbage collection needs to be manually triggered. This process can also be automated by using a cron job. + +By default, the API route to trigger GC is `http://$RAINBOW_CTL_LISTEN_ADDRESS/mgr/gc`. The `BytesToFree` parameter must be passed in order to specify the upper limit of how much disk space should be cleared. Setting this parameter to a very high value will GC the entire datastore. + +Example cURL commmand to run GC: + + curl -v --data '{"BytesToFree": 1099511627776}' http://127.0.0.1:8091/mgr/gc + ## Deployment An ansible role to deploy Rainbow is available within the ipfs.ipfs collection in Ansible Galaxy (https://github.com/ipfs-shipyard/ansible). It includes a systemd service unit file. diff --git a/docs/blockstores.md b/docs/blockstores.md new file mode 100644 index 0000000..577d28e --- /dev/null +++ b/docs/blockstores.md @@ -0,0 +1,33 @@ +# Rainbow Blockstores + +`rainbow` ships with a number of possible backing block storage options for the purposes of caching data locally. +Because `rainbow`, as a gateway-only IPFS implementation, is not designed for long-term data storage there are no long +term guarantees of support for any particular backing blockstore. + +`rainbow` currently ships with the following blockstores: + +- [FlatFS](#flatfs) +- [Badger](#badger) + +Note: `rainbow` exposes minimal configurability of each blockstore, if in your experimentation you note that tuning some +parameters is a big benefit to you file an issue/PR to discuss changing the blockstores parameters or if there's demand +to expose more configurability. + +## FlatFS + +FlatFS is a fairly simple blockstore that puts each block into a separate file on disk. Due to the heavy usage of the +filesystem (i.e. not just how bytes are stored on disk but file and directory structure as well) there are various +optimizations to be had in selection of the filesystem and disk types. For example, choosing a filesystem that enables +putting file metadata on a fast SSD while keeping the actual data on a slower disk might ease various lookup types. + +## Badger + +`rainbow` ships with [Badger-v4](https://github.com/dgraph-io/badger). +The main reasons to choose Badger compared to FlatFS are: +- It uses far fewer file descriptors and disk operations +- It comes with the ability to compress data on disk +- Generally faster reads and writes +- Native bloom filters + +The main difficulty with Badger is that its internal garbage collection functionality (not `rainbow`'s) is dependent on +workload which makes it difficult to ahead-of-time judge the kinds of capacity you need. \ No newline at end of file diff --git a/gc.go b/gc.go index 84661b6..8c5a21f 100644 --- a/gc.go +++ b/gc.go @@ -2,7 +2,6 @@ package main import ( "context" - badger4 "github.com/ipfs/go-ds-badger4" ) diff --git a/go.mod b/go.mod index 5db2f2b..e649e0e 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-badger4 v0.0.0-20231006150127-9137bcc6b981 + github.com/ipfs/go-ds-flatfs v0.5.1 github.com/ipfs/go-ipfs-delay v0.0.1 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 @@ -43,6 +44,7 @@ require ( require ( github.com/Jorropo/jsync v1.0.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect diff --git a/go.sum b/go.sum index baf844f..f835251 100644 --- a/go.sum +++ b/go.sum @@ -47,6 +47,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= +github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -308,6 +310,7 @@ github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= @@ -315,6 +318,8 @@ github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46U github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger4 v0.0.0-20231006150127-9137bcc6b981 h1:GOKV62VnjerKwO7mwOyeoArzlaVrDLyoC/YPNtxxGwg= github.com/ipfs/go-ds-badger4 v0.0.0-20231006150127-9137bcc6b981/go.mod h1:LUU2FbhNdmhAbJmMeoahVRbe4GsduAODSJHWJJh2Vo4= +github.com/ipfs/go-ds-flatfs v0.5.1 h1:ZCIO/kQOS/PSh3vcF1H6a8fkRGS7pOfwfPdx4n/KJH4= +github.com/ipfs/go-ds-flatfs v0.5.1/go.mod h1:RWTV7oZD/yZYBKdbVIFXTX2fdY2Tbvl94NsWqmoyAX4= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ipfs-blockstore v1.3.0 h1:m2EXaWgwTzAfsmt5UdJ7Is6l4gJcaM/A12XwJyvYvMM= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= @@ -339,8 +344,10 @@ github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= @@ -394,6 +401,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -535,6 +543,7 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= @@ -736,6 +745,7 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -747,11 +757,13 @@ go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpK go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -1101,6 +1113,7 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/main.go b/main.go index 43582d3..069bcf5 100644 --- a/main.go +++ b/main.go @@ -139,7 +139,7 @@ Generate an identity seed and launch a gateway: Name: "inmem-block-cache", Value: 1 << 30, EnvVars: []string{"RAINBOW_INMEM_BLOCK_CACHE"}, - Usage: "Size of the in-memory block cache. 0 to disable (disables compression on disk too)", + Usage: "Size of the in-memory block cache (currently only used for badger). 0 to disable (disables compression on disk too)", }, &cli.Uint64Flag{ Name: "max-memory", @@ -176,6 +176,12 @@ Generate an identity seed and launch a gateway: EnvVars: []string{"RAINBOW_PEERING"}, Usage: "Multiaddresses of peers to stay connected to (comma-separated)", }, + &cli.StringFlag{ + Name: "blockstore", + Value: "flatfs", + EnvVars: []string{"RAINBOW_BLOCKSTORE"}, + Usage: "Type of blockstore to use, such as flatfs or badger. See https://github.com/ipfs/rainbow/blockstore.md for more details", + }, } app.Commands = []*cli.Command{ @@ -261,6 +267,7 @@ share the same seed as long as the indexes are different. cfg := Config{ DataDir: ddir, + BlockstoreType: cctx.String("blockstore"), GatewayDomains: getCommaSeparatedList(cctx.String("gateway-domains")), SubdomainGatewayDomains: getCommaSeparatedList(cctx.String("subdomain-gateway-domains")), ConnMgrLow: cctx.Int("connmgr-low"), diff --git a/setup.go b/setup.go index 02c32c8..c3ecb0f 100644 --- a/setup.go +++ b/setup.go @@ -11,8 +11,8 @@ import ( "path/filepath" "time" - badger "github.com/dgraph-io/badger/v4" - options "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v4/options" nopfs "github.com/ipfs-shipyard/nopfs" nopfsipfs "github.com/ipfs-shipyard/nopfs/ipfs" bsclient "github.com/ipfs/boxo/bitswap/client" @@ -30,6 +30,7 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" badger4 "github.com/ipfs/go-ds-badger4" + flatfs "github.com/ipfs/go-ds-flatfs" delay "github.com/ipfs/go-ipfs-delay" metri "github.com/ipfs/go-metrics-interface" mprome "github.com/ipfs/go-metrics-prometheus" @@ -80,7 +81,8 @@ type Node struct { } type Config struct { - DataDir string + DataDir string + BlockstoreType string ListenAddrs []string AnnounceAddrs []string @@ -362,52 +364,59 @@ func Setup(ctx context.Context, cfg Config, key crypto.PrivKey, dnsCache *cached } func setupDatastore(cfg Config) (datastore.Batching, error) { - badgerOpts := badger.DefaultOptions("") - badgerOpts.CompactL0OnClose = false - // ValueThreshold: defaults to 1MB! For us that means everything goes - // into the LSM tree and that means more stuff in memory. We only - // put very small things on the LSM tree by default (i.e. a single - // CID). - badgerOpts.ValueThreshold = 256 - - // BlockCacheSize: instead of using blockstore, we cache things - // here. This only makes sense if using compression, according to - // docs. - badgerOpts.BlockCacheSize = cfg.InMemBlockCache // default 1 GiB. - - // Compression: default. Trades reading less from disk for using more - // CPU. Given gateways are usually IO bound, I think we can make this - // trade. - if badgerOpts.BlockCacheSize == 0 { - badgerOpts.Compression = options.None - } else { - badgerOpts.Compression = options.Snappy - } + switch cfg.BlockstoreType { + case "flatfs": + return flatfs.CreateOrOpen(filepath.Join(cfg.DataDir, "flatfs"), flatfs.NextToLast(3), false) + case "badger": + badgerOpts := badger.DefaultOptions("") + badgerOpts.CompactL0OnClose = false + // ValueThreshold: defaults to 1MB! For us that means everything goes + // into the LSM tree and that means more stuff in memory. We only + // put very small things on the LSM tree by default (i.e. a single + // CID). + badgerOpts.ValueThreshold = 256 + + // BlockCacheSize: instead of using blockstore, we cache things + // here. This only makes sense if using compression, according to + // docs. + badgerOpts.BlockCacheSize = cfg.InMemBlockCache // default 1 GiB. + + // Compression: default. Trades reading less from disk for using more + // CPU. Given gateways are usually IO bound, I think we can make this + // trade. + if badgerOpts.BlockCacheSize == 0 { + badgerOpts.Compression = options.None + } else { + badgerOpts.Compression = options.Snappy + } - // If we write something twice, we do it with the same values so - // *shrugh*. - badgerOpts.DetectConflicts = false - - // MemTableSize: Defaults to 64MiB which seems an ok amount to flush - // to disk from time to time. - badgerOpts.MemTableSize = 64 << 20 - // NumMemtables: more means more memory, faster writes, but more to - // commit to disk if they get full. Default is 5. - badgerOpts.NumMemtables = 5 - - // IndexCacheSize: 0 means all in memory (default). All means indexes, - // bloom filters etc. Usually not huge amount of memory usage from - // this. - badgerOpts.IndexCacheSize = 0 - - opts := badger4.Options{ - GcDiscardRatio: 0.3, - GcInterval: 20 * time.Minute, - GcSleep: 10 * time.Second, - Options: badgerOpts, - } + // If we write something twice, we do it with the same values so + // *shrugh*. + badgerOpts.DetectConflicts = false + + // MemTableSize: Defaults to 64MiB which seems an ok amount to flush + // to disk from time to time. + badgerOpts.MemTableSize = 64 << 20 + // NumMemtables: more means more memory, faster writes, but more to + // commit to disk if they get full. Default is 5. + badgerOpts.NumMemtables = 5 + + // IndexCacheSize: 0 means all in memory (default). All means indexes, + // bloom filters etc. Usually not huge amount of memory usage from + // this. + badgerOpts.IndexCacheSize = 0 + + opts := badger4.Options{ + GcDiscardRatio: 0.3, + GcInterval: 20 * time.Minute, + GcSleep: 10 * time.Second, + Options: badgerOpts, + } - return badger4.NewDatastore(filepath.Join(cfg.DataDir, "badger4"), &opts) + return badger4.NewDatastore(filepath.Join(cfg.DataDir, "badger4"), &opts) + default: + return nil, fmt.Errorf("unsupported blockstore type: %s", cfg.BlockstoreType) + } } type bundledDHT struct {