From bf8c70655916080f39970047531154a6c761159a Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:36:25 +0200 Subject: [PATCH 01/18] Add test case for new generic parser (#1803) --- protocol/parser/parser_test.go | 47 ++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/protocol/parser/parser_test.go b/protocol/parser/parser_test.go index 82d78c261f..e0e57371ff 100644 --- a/protocol/parser/parser_test.go +++ b/protocol/parser/parser_test.go @@ -601,6 +601,7 @@ func TestParseBlockFromReply(t *testing.T) { blockParser spectypes.BlockParser genericParsers []spectypes.GenericParser expected int64 + expectedError string }{ { name: "generic_parser_happy_flow_default_value", @@ -718,6 +719,47 @@ func TestParseBlockFromReply(t *testing.T) { }, expected: spectypes.LATEST_BLOCK, }, + { + name: "generic_parser_parse_from_result_happy_flow", + rpcInput: &RPCInputTest{ + Result: []byte(` + { + "foo": { + "bar": 123 + } + } + `), + }, + genericParsers: []spectypes.GenericParser{ + { + ParsePath: ".result.foo.bar", + Value: "123", + ParseType: spectypes.PARSER_TYPE_RESULT, + }, + }, + expected: 123, + }, + { + name: "generic_parser_parse_from_result_error", + rpcInput: &RPCInputTest{ + Result: []byte(` + { + "foo": { + "bar": 123 + } + } + `), + }, + genericParsers: []spectypes.GenericParser{ + { + ParsePath: ".result.foo.bar", + Value: "321", + ParseType: spectypes.PARSER_TYPE_RESULT, + }, + }, + expected: 123, + expectedError: "expected 321, received 123", + }, } for _, test := range tests { @@ -725,6 +767,11 @@ func TestParseBlockFromReply(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() parsedInput := ParseBlockFromReply(test.rpcInput, test.blockParser, test.genericParsers) + if test.expectedError != "" { + require.Equal(t, test.expectedError, parsedInput.GetParserError()) + } else { + require.Empty(t, parsedInput.GetParserError()) + } require.Equal(t, test.expected, parsedInput.GetBlock()) }) } From 13835ed8c46fab5443bb43c98c39fb76172985d4 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:37:25 +0200 Subject: [PATCH 02/18] fix: PRT - Avalance and Starknet fixes (#1796) * Fix the chain router for avalanche * Fix ethereum GET_BLOCKNUM * Fix avalanche spec * More fixes to the chain fetcher * Updated the starknet example * Small fix * Fix jsonrpc id in e2e proxy * Fixed and added some tests for internal paths * Ethereum fixes * Revert "Ethereum fixes" This reverts commit 17f9f6dfac8d60879f487887381a1130f833dcc7. * Revert "Fix jsonrpc id in e2e proxy" This reverts commit 796a4d519f3c6b7063a1e7eecb3d894b5d8c8571. * Revert "Fix ethereum GET_BLOCKNUM" This reverts commit 549cd833c996aa95788b97e042d45175e9048158. * Extracted to function * Small fix * Fix the proxy websocket handler * Fix lint --- .../avalanch_internal_paths_example.yml | 3 +- config/provider_examples/strk_example.yml | 8 +- cookbook/specs/avalanche.json | 12 +- protocol/chainlib/base_chain_parser.go | 46 +++- protocol/chainlib/chain_fetcher.go | 35 ++- protocol/chainlib/chain_router.go | 69 +++--- protocol/chainlib/chain_router_test.go | 226 ++++++++++++++++-- protocol/chainlib/chainlib.go | 3 +- protocol/chainlib/jsonRPC.go | 2 +- protocol/rpcprovider/rpcprovider_server.go | 2 +- testutil/e2e/proxy/proxy.go | 47 +++- utils/lavaslices/slices.go | 8 - utils/maps/maps.go | 16 ++ 13 files changed, 392 insertions(+), 85 deletions(-) diff --git a/config/provider_examples/avalanch_internal_paths_example.yml b/config/provider_examples/avalanch_internal_paths_example.yml index bf69abb6ad..523c1e19fa 100644 --- a/config/provider_examples/avalanch_internal_paths_example.yml +++ b/config/provider_examples/avalanch_internal_paths_example.yml @@ -2,7 +2,8 @@ endpoints: - api-interface: jsonrpc chain-id: AVAX - network-address: 127.0.0.1:2221 + network-address: + address: 127.0.0.1:2221 node-urls: - url: ws://127.0.0.1:3333/C/rpc/ws internal-path: "/C/rpc" # c chain like specified in the spec diff --git a/config/provider_examples/strk_example.yml b/config/provider_examples/strk_example.yml index 111bfaac07..257bacb6b8 100644 --- a/config/provider_examples/strk_example.yml +++ b/config/provider_examples/strk_example.yml @@ -5,9 +5,11 @@ endpoints: address: "127.0.0.1:2220" node-urls: - url: /ws - internal-path: "" + internal-path: "/ws" - url: /ws/rpc/v0_6 - internal-path: "/rpc/v0_6" + internal-path: "/ws/rpc/v0_6" + - url: /ws/rpc/v0_7 + internal-path: "/ws/rpc/v0_7" - url: internal-path: "" @@ -15,3 +17,5 @@ endpoints: internal-path: "/rpc/v0_5" - url: /rpc/v0_6 internal-path: "/rpc/v0_6" + - url: /rpc/v0_7 + internal-path: "/rpc/v0_7" diff --git a/cookbook/specs/avalanche.json b/cookbook/specs/avalanche.json index a7acedc3d0..3633cf938f 100644 --- a/cookbook/specs/avalanche.json +++ b/cookbook/specs/avalanche.json @@ -11,15 +11,15 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 4, "blocks_in_finalization_proof": 3, - "average_block_time": 2500, - "allowed_block_lag_for_qos_sync": 4, + "average_block_time": 2000, + "allowed_block_lag_for_qos_sync": 5, "imports": [ "ETH1" ], "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "50000000" }, "api_collections": [ { @@ -735,12 +735,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 4, "blocks_in_finalization_proof": 3, - "average_block_time": 2500, - "allowed_block_lag_for_qos_sync": 4, + "average_block_time": 2000, + "allowed_block_lag_for_qos_sync": 5, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "50000000" }, "api_collections": [ { diff --git a/protocol/chainlib/base_chain_parser.go b/protocol/chainlib/base_chain_parser.go index 487d545aba..fa730cde48 100644 --- a/protocol/chainlib/base_chain_parser.go +++ b/protocol/chainlib/base_chain_parser.go @@ -12,6 +12,7 @@ import ( "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/utils" "github.com/lavanet/lava/v4/utils/lavaslices" + "github.com/lavanet/lava/v4/utils/maps" epochstorage "github.com/lavanet/lava/v4/x/epochstorage/types" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" @@ -22,8 +23,16 @@ type PolicyInf interface { GetSupportedExtensions(specID string) (extensions []epochstorage.EndpointService, err error) } +type InternalPath struct { + Path string + Enabled bool + ApiInterface string + ConnectionType string + Addon string +} + type BaseChainParser struct { - internalPaths map[string]struct{} + internalPaths map[string]InternalPath taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer spec spectypes.Spec rwLock sync.RWMutex @@ -231,7 +240,7 @@ func (bcp *BaseChainParser) GetVerifications(supported []string, internalPath st return retVerifications, nil } -func (bcp *BaseChainParser) Construct(spec spectypes.Spec, internalPaths map[string]struct{}, taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, +func (bcp *BaseChainParser) Construct(spec spectypes.Spec, internalPaths map[string]InternalPath, taggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, serverApis map[ApiKey]ApiContainer, apiCollections map[CollectionKey]*spectypes.ApiCollection, headers map[ApiKey]*spectypes.Header, verifications map[VerificationKey]map[string][]VerificationContainer, ) { @@ -280,7 +289,16 @@ func (bcp *BaseChainParser) IsTagInCollection(tag spectypes.FUNCTION_TAG, collec func (bcp *BaseChainParser) GetAllInternalPaths() []string { bcp.rwLock.RLock() defer bcp.rwLock.RUnlock() - return lavaslices.KeysSlice(bcp.internalPaths) + return lavaslices.Map(maps.ValuesSlice(bcp.internalPaths), func(internalPath InternalPath) string { + return internalPath.Path + }) +} + +func (bcp *BaseChainParser) IsInternalPathEnabled(internalPath string, apiInterface string, addon string) bool { + bcp.rwLock.RLock() + defer bcp.rwLock.RUnlock() + internalPathObj, ok := bcp.internalPaths[internalPath] + return ok && internalPathObj.Enabled && internalPathObj.ApiInterface == apiInterface && internalPathObj.Addon == addon } func (bcp *BaseChainParser) ExtensionParsing(addon string, parsedMessageArg *baseChainMessageContainer, extensionInfo extensionslib.ExtensionInfo) { @@ -370,8 +388,18 @@ func (apip *BaseChainParser) getApiCollection(connectionType, internalPath, addo return api, nil } -func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths map[string]struct{}, retServerApis map[ApiKey]ApiContainer, retTaggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, retApiCollections map[CollectionKey]*spectypes.ApiCollection, retHeaders map[ApiKey]*spectypes.Header, retVerifications map[VerificationKey]map[string][]VerificationContainer) { - retInternalPaths = map[string]struct{}{} +func getServiceApis( + spec spectypes.Spec, + rpcInterface string, +) ( + retInternalPaths map[string]InternalPath, + retServerApis map[ApiKey]ApiContainer, + retTaggedApis map[spectypes.FUNCTION_TAG]TaggedContainer, + retApiCollections map[CollectionKey]*spectypes.ApiCollection, + retHeaders map[ApiKey]*spectypes.Header, + retVerifications map[VerificationKey]map[string][]VerificationContainer, +) { + retInternalPaths = map[string]InternalPath{} serverApis := map[ApiKey]ApiContainer{} taggedApis := map[spectypes.FUNCTION_TAG]TaggedContainer{} headers := map[ApiKey]*spectypes.Header{} @@ -392,7 +420,13 @@ func getServiceApis(spec spectypes.Spec, rpcInterface string) (retInternalPaths } // add as a valid internal path - retInternalPaths[apiCollection.CollectionData.InternalPath] = struct{}{} + retInternalPaths[apiCollection.CollectionData.InternalPath] = InternalPath{ + Path: apiCollection.CollectionData.InternalPath, + Enabled: apiCollection.Enabled, + ApiInterface: apiCollection.CollectionData.ApiInterface, + ConnectionType: apiCollection.CollectionData.Type, + Addon: apiCollection.CollectionData.AddOn, + } for _, parsing := range apiCollection.ParseDirectives { taggedApis[parsing.FunctionTag] = TaggedContainer{ diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index c9fc7f255a..920cab724b 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -121,6 +121,26 @@ func (cf *ChainFetcher) populateCache(relayData *pairingtypes.RelayPrivateData, } } +func getExtensionsForVerification(verification VerificationContainer, chainParser ChainParser) []string { + extensions := []string{verification.Extension} + + collectionKey := CollectionKey{ + InternalPath: verification.InternalPath, + Addon: verification.Addon, + ConnectionType: verification.ConnectionType, + } + + if chainParser.IsTagInCollection(spectypes.FUNCTION_TAG_SUBSCRIBE, collectionKey) { + if verification.Extension == "" { + extensions = []string{WebSocketExtension} + } else { + extensions = append(extensions, WebSocketExtension) + } + } + + return extensions +} + func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationContainer, latestBlock uint64) error { parsing := &verification.ParseDirective @@ -173,12 +193,21 @@ func (cf *ChainFetcher) Verify(ctx context.Context, verification VerificationCon return utils.LavaFormatError("[-] verify failed creating chainMessage", err, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) } - reply, _, _, proxyUrl, chainId, err := cf.chainRouter.SendNodeMsg(ctx, nil, chainMessage, []string{verification.Extension}) + extensions := getExtensionsForVerification(verification, cf.chainParser) + + reply, _, _, proxyUrl, chainId, err := cf.chainRouter.SendNodeMsg(ctx, nil, chainMessage, extensions) if err != nil { - return utils.LavaFormatWarning("[-] verify failed sending chainMessage", err, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) + return utils.LavaFormatWarning("[-] verify failed sending chainMessage", err, + utils.LogAttr("chainID", cf.endpoint.ChainID), + utils.LogAttr("APIInterface", cf.endpoint.ApiInterface), + utils.LogAttr("extensions", extensions), + ) } if reply == nil || reply.RelayReply == nil { - return utils.LavaFormatWarning("[-] verify failed sending chainMessage, reply or reply.RelayReply are nil", nil, []utils.Attribute{{Key: "chainID", Value: cf.endpoint.ChainID}, {Key: "APIInterface", Value: cf.endpoint.ApiInterface}}...) + return utils.LavaFormatWarning("[-] verify failed sending chainMessage, reply or reply.RelayReply are nil", nil, + utils.LogAttr("chainID", cf.endpoint.ChainID), + utils.LogAttr("APIInterface", cf.endpoint.ApiInterface), + ) } parserInput, err := FormatResponseForParsing(reply.RelayReply, chainMessage) diff --git a/protocol/chainlib/chain_router.go b/protocol/chainlib/chain_router.go index cbad7305e6..7a00cf5b94 100644 --- a/protocol/chainlib/chain_router.go +++ b/protocol/chainlib/chain_router.go @@ -80,9 +80,10 @@ func (cri *chainRouterImpl) GetChainProxySupporting(ctx context.Context, addon s ) } -func (cri chainRouterImpl) ExtensionsSupported(extensions []string) bool { - routerKey := lavasession.NewRouterKey(extensions).String() - _, ok := cri.chainProxyRouter[routerKey] +func (cri chainRouterImpl) ExtensionsSupported(internalPath string, extensions []string) bool { + routerKey := lavasession.NewRouterKey(extensions) + routerKey.ApplyInternalPath(internalPath) + _, ok := cri.chainProxyRouter[routerKey.String()] return ok } @@ -111,39 +112,47 @@ func (cri *chainRouterImpl) autoGenerateMissingInternalPaths(isWs bool, nodeUrl nodeUrl.InternalPath = internalPath // add internal path to the nodeUrl nodeUrl.Url = baseUrl + internalPath routerKey.ApplyInternalPath(internalPath) - if isWs { - addons, _, err := chainParser.SeparateAddonsExtensions(nodeUrl.Addons) - if err != nil { - return err - } - lookForSubscriptionTag := func() bool { - for _, connectionType := range []string{"POST", ""} { - if len(addons) == 0 { - addons = append(addons, "") + addons, _, err := chainParser.SeparateAddonsExtensions(nodeUrl.Addons) + if err != nil { + return err + } + + subscriptionTagFound := func() bool { + for _, connectionType := range []string{"POST", ""} { + if len(addons) == 0 { + addons = append(addons, "") + } + + for _, addon := range addons { + // check subscription exists, we only care for subscription API's because otherwise we use http anyway. + collectionKey := CollectionKey{ + InternalPath: internalPath, + Addon: addon, + ConnectionType: connectionType, } - for _, addon := range addons { - // check subscription exists, we only care for subscription API's because otherwise we use http anyway. - collectionKey := CollectionKey{ - InternalPath: internalPath, - Addon: addon, - ConnectionType: connectionType, - } - - if chainParser.IsTagInCollection(spectypes.FUNCTION_TAG_SUBSCRIBE, collectionKey) { - return true - } + if chainParser.IsTagInCollection(spectypes.FUNCTION_TAG_SUBSCRIBE, collectionKey) { + return true } } - return false } + return false + }() - if !lookForSubscriptionTag() { - continue - } + if isWs && !subscriptionTagFound { + // this is ws, don't auto generate http paths + continue + } else if !isWs && subscriptionTagFound { + // this is http, don't auto generate ws paths + continue } + utils.LavaFormatDebug("auto generated internal path", + utils.LogAttr("nodeUrl", nodeUrl.Url), + utils.LogAttr("internalPath", internalPath), + utils.LogAttr("routerKey", routerKey.String()), + ) cri.setRouterKeyInBatch(nodeUrl, returnedBatch, routerKey, rpcProviderEndpoint, false) // will not override existing entries } @@ -195,9 +204,7 @@ func (cri *chainRouterImpl) BatchNodeUrlsByServices(rpcProviderEndpoint lavasess } } - // check if batch has http configured, if not, add a websocket one - // prefer one without internal path - if !httpRootRouteSet { + if !httpRootRouteSet && chainParser.IsInternalPathEnabled("", rpcProviderEndpoint.ApiInterface, "") { return nil, utils.LavaFormatError("HTTP/HTTPS is mandatory. It is recommended to configure both HTTP/HTTP and WS/WSS.", nil, utils.LogAttr("nodeUrls", rpcProviderEndpoint.NodeUrls)) } @@ -329,6 +336,8 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase } } + utils.LavaFormatDebug("router keys", utils.LogAttr("chainProxyRouter", chainProxyRouter)) + // make sure all chainProxyRouter entries have one without a method routing for routerKey, chainRouterEntries := range chainProxyRouter { // get the last entry, if it has methods routed, we need to error out diff --git a/protocol/chainlib/chain_router_test.go b/protocol/chainlib/chain_router_test.go index d7e2ca6cbe..aefdc84b1c 100644 --- a/protocol/chainlib/chain_router_test.go +++ b/protocol/chainlib/chain_router_test.go @@ -1220,7 +1220,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { for _, apiInterface := range apiInterfaces { playBook = append(playBook, []play{ { - name: "No internal paths in spec - single http node url configured", + name: "no_internal_paths_in_spec__single_http_node_url_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1244,7 +1244,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "No internal paths in spec - multiple http node urls configured", + name: "no_internal_paths_in_spec__multiple_http_node_urls_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1275,7 +1275,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "No internal paths in spec - single ws node url - should error", + name: "no_internal_paths_in_spec__single_ws_node_url__should_error", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1297,7 +1297,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { expectedError: true, }, { - name: "No internal paths in spec - both ws and http node urls", + name: "no_internal_paths_in_spec__both_ws_and_http_node_urls", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1326,7 +1326,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - single http node url configured - not covering all internal paths", + name: "with_internal_paths_in_spec__single_http_node_url_configured__not_covering_all_internal_paths", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1386,7 +1386,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - multiple http node urls configured - covering some internal paths", + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__covering_some_internal_paths", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1450,7 +1450,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - multiple http node urls configured - covering all internal paths", + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__covering_all_internal_paths", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1518,7 +1518,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - multiple http node urls configured - no root internal path - should error", + name: "with_internal_paths_in_spec__multiple_http_node_urls_configured__no_root_internal_path__should_error", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1578,7 +1578,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { expectedError: true, }, { - name: "With internal paths in spec - multiple http node urls and ws configured - covering all internal paths", + name: "with_internal_paths_in_spec__multiple_http_node_urls_and_ws_configured__covering_all_internal_paths", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1651,7 +1651,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - only root http and ws configured", + name: "with_internal_paths_in_spec__only_root_http_and_ws_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1716,7 +1716,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths in spec - only root http and ws and one out of two internal paths are configured", + name: "with_internal_paths_in_spec__only_root_http_and_ws_and_one_out_of_two_internal_paths_are_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1785,7 +1785,7 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, { - name: "With internal paths and ws internal paths in spec - only http is configured", + name: "with_internal_paths_and_ws_internal_paths_in_spec__only_http_is_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1842,13 +1842,12 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, }, expectedServicesToNodeUrls: map[string][]common.NodeUrl{ - "||": {{Url: "https://localhost:1234", InternalPath: ""}}, - "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, - "||internal-path:/WS|": {{Url: "https://localhost:1234/WS", InternalPath: "/WS"}}, + "||": {{Url: "https://localhost:1234", InternalPath: ""}}, + "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, }, }, { - name: "With internal paths and ws internal paths in spec - http and ws is configured", + name: "with_internal_paths_and_ws_internal_paths_in_spec__http_and_ws_is_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1911,13 +1910,12 @@ func TestChainRouterWithInternalPaths(t *testing.T) { expectedServicesToNodeUrls: map[string][]common.NodeUrl{ "||": {{Url: "https://localhost:1234", InternalPath: ""}}, "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, - "||internal-path:/WS|": {{Url: "https://localhost:1234/WS", InternalPath: "/WS"}}, "|websocket|": {{Url: "wss://localhost:5678", InternalPath: ""}}, "|websocket|internal-path:/WS|": {{Url: "wss://localhost:5678/WS", InternalPath: "/WS"}}, }, }, { - name: "With internal paths and multiple ws internal paths in spec - http and ws is configured", + name: "with_internal_paths_and_multiple_ws_internal_paths_in_spec__http_and_ws_is_configured", apiInterface: apiInterface, specApiCollections: []*spectypes.ApiCollection{ { @@ -1982,18 +1980,204 @@ func TestChainRouterWithInternalPaths(t *testing.T) { }, expectedServicesToNodeUrls: map[string][]common.NodeUrl{ "||": {{Url: "https://localhost:1234", InternalPath: ""}}, - "||internal-path:/X|": {{Url: "https://localhost:1234/X", InternalPath: "/X"}}, - "||internal-path:/WS|": {{Url: "https://localhost:1234/WS", InternalPath: "/WS"}}, "|websocket|": {{Url: "wss://localhost:1234", InternalPath: ""}}, "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234/WS", InternalPath: "/WS"}}, "|websocket|internal-path:/X|": {{Url: "wss://localhost:1234/X", InternalPath: "/X"}}, }, }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_http_only_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_ws_only_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, + { + name: "with_internal_paths_and_mixed_internal_paths_in_spec_and_root_is_disabled_http_and_ws_is_configured", + apiInterface: apiInterface, + specApiCollections: []*spectypes.ApiCollection{ + { + Enabled: false, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/X", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + }, + { + Enabled: true, + CollectionData: spectypes.CollectionData{ + ApiInterface: apiInterface, + InternalPath: "/WS", + Type: "POST", + AddOn: "", + }, + InheritanceApis: []*spectypes.CollectionData{ + { + ApiInterface: apiInterface, + InternalPath: "", + Type: "POST", + AddOn: "", + }, + }, + ParseDirectives: []*spectypes.ParseDirective{{ + FunctionTag: spectypes.FUNCTION_TAG_SUBSCRIBE, + }}, + }, + }, + nodeUrls: []common.NodeUrl{ + { + Url: "https://localhost:1234", + InternalPath: "/X", + }, + { + Url: "wss://localhost:1234", + InternalPath: "/WS", + }, + }, + expectedServicesToNodeUrls: map[string][]common.NodeUrl{ + "||internal-path:/X|": {{Url: "https://localhost:1234", InternalPath: "/X"}}, + "|websocket|internal-path:/WS|": {{Url: "wss://localhost:1234", InternalPath: "/WS"}}, + }, + }, }...) } for _, play := range playBook { - t.Run(play.apiInterface+" - "+play.name, func(t *testing.T) { + t.Run(play.apiInterface+"__"+play.name, func(t *testing.T) { chainParser, err := NewChainParser(play.apiInterface) require.NoError(t, err) diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 71e40aeead..49a90af1db 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -65,6 +65,7 @@ type ChainParser interface { GetParsingByTag(tag spectypes.FUNCTION_TAG) (parsing *spectypes.ParseDirective, apiCollection *spectypes.ApiCollection, existed bool) IsTagInCollection(tag spectypes.FUNCTION_TAG, collectionKey CollectionKey) bool GetAllInternalPaths() []string + IsInternalPathEnabled(internalPath string, apiInterface string, addon string) bool CraftMessage(parser *spectypes.ParseDirective, connectionType string, craftData *CraftData, metadata []pairingtypes.Metadata) (ChainMessageForSend, error) HandleHeaders(metadata []pairingtypes.Metadata, apiCollection *spectypes.ApiCollection, headersDirection spectypes.Header_HeaderType) (filtered []pairingtypes.Metadata, overwriteReqBlock string, ignoredMetadata []pairingtypes.Metadata) GetVerifications(supported []string, internalPath string, apiInterface string) ([]VerificationContainer, error) @@ -148,7 +149,7 @@ type ChainListener interface { type ChainRouter interface { SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend, extensions []string) (relayReply *RelayReplyWrapper, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, proxyUrl common.NodeUrl, chainId string, err error) // has to be thread safe, reuse code within ParseMsg as common functionality - ExtensionsSupported([]string) bool + ExtensionsSupported(internalPath string, extensions []string) bool } type ChainProxy interface { diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index c5aecbb5a8..a8c18e2db7 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -666,7 +666,7 @@ func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, // try to parse node error as json message rpcMessage = TryRecoverNodeErrorFromClientError(nodeErr) if rpcMessage == nil { - utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr)) + utils.LavaFormatDebug("got error from node", utils.LogAttr("GUID", ctx), utils.LogAttr("nodeErr", nodeErr), utils.LogAttr("nodeUrl", cp.NodeUrl.Url)) return nil, "", nil, nodeErr } } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 47ab9c2914..ab0e15559d 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -359,7 +359,7 @@ func (rpcps *RPCProviderServer) ValidateAddonsExtensions(addon string, extension if apiCollection.CollectionData.AddOn != addon { return utils.LavaFormatWarning("invalid addon in relay, parsed addon is not the same as requested", nil, utils.Attribute{Key: "requested addon", Value: addon[0]}, utils.Attribute{Key: "parsed addon", Value: chainMessage.GetApiCollection().CollectionData.AddOn}) } - if !rpcps.chainRouter.ExtensionsSupported(extensions) { + if !rpcps.chainRouter.ExtensionsSupported(apiCollection.CollectionData.InternalPath, extensions) { return utils.LavaFormatWarning("requested extensions are unsupported in chainRouter", nil, utils.Attribute{Key: "requested extensions", Value: extensions}) } return nil diff --git a/testutil/e2e/proxy/proxy.go b/testutil/e2e/proxy/proxy.go index 9eb9dde326..e5c73e3826 100644 --- a/testutil/e2e/proxy/proxy.go +++ b/testutil/e2e/proxy/proxy.go @@ -195,9 +195,46 @@ func startProxyProcess(process proxyProcess) { break } // Print the message to the console - log.Printf("Received: %s\n", msg) - // Write message back to browser - if err = conn.WriteMessage(msgType, msg); err != nil { + log.Printf("WS Received: %s\n", msg) + + var respmsg rpcclient.JsonrpcMessage + err = json.Unmarshal(msg, &respmsg) + if err != nil { + println(err.Error()) + continue + } + + replyMessage, err := rpcInterfaceMessages.ConvertJsonRPCMsg(&respmsg) + if err != nil { + println(err.Error()) + continue + } + + jStruct := &jsonStruct{} + err = json.Unmarshal(msg, jStruct) + if err != nil { + println(err.Error()) + continue + } + jStruct.ID = 0 + rawBodySNoID, _ := json.Marshal(jStruct) + + if val, ok := process.mock.requests[string(rawBodySNoID)]; ok && process.cache { + orderedJSON := idInsertedResponse(val, replyMessage) + println(dotsStr+process.port+dotsStr+process.id+" ::: Cached Response ::: ", orderedJSON) + cacheCount += 1 + + // Change Response + if fakeResponse { + val = fakeResult(val, "0xe000000000000000000") + // val = "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xe000000000000000000\"}" + println(process.port+" ::: Fake Response ::: ", val) + fakeCount += 1 + } + time.Sleep(500 * time.Millisecond) + conn.WriteMessage(msgType, []byte(orderedJSON)) + } else if err = conn.WriteMessage(msgType, msg); err != nil { + // Write message back to browser log.Println("Write error:", err) break } @@ -250,7 +287,7 @@ func fakeResult(val, fake string) string { return strings.Join(parts, ",") } -func idInstertedResponse(val string, replyMessage *rpcInterfaceMessages.JsonrpcMessage) string { +func idInsertedResponse(val string, replyMessage *rpcInterfaceMessages.JsonrpcMessage) string { // Extract ID from raw message respId, idErr := rpcInterfaceMessages.IdFromRawMessage(replyMessage.ID) if idErr != nil { @@ -320,7 +357,7 @@ func (p proxyProcess) LavaTestProxy(responseWriter http.ResponseWriter, request jStruct.ID = 0 rawBodySNoID, _ := json.Marshal(jStruct) if val, ok := mock.requests[string(rawBodySNoID)]; ok && p.cache { - orderedJSON := idInstertedResponse(val, replyMessage) + orderedJSON := idInsertedResponse(val, replyMessage) println(dotsStr+p.port+dotsStr+p.id+" ::: Cached Response ::: ", orderedJSON) cacheCount += 1 diff --git a/utils/lavaslices/slices.go b/utils/lavaslices/slices.go index 56b385d38e..4eb0c64b8e 100644 --- a/utils/lavaslices/slices.go +++ b/utils/lavaslices/slices.go @@ -276,14 +276,6 @@ func Map[T, V any](slice []T, filter func(T) V) []V { return values } -func KeysSlice[T comparable, V any](in map[T]V) []T { - keys := []T{} - for k := range in { - keys = append(keys, k) - } - return keys -} - func Filter[T any](slice []T, filter func(T) bool) []T { values := make([]T, 0) for _, v := range slice { diff --git a/utils/maps/maps.go b/utils/maps/maps.go index e6702b46fb..5486c70672 100644 --- a/utils/maps/maps.go +++ b/utils/maps/maps.go @@ -37,3 +37,19 @@ func GetMaxKey[T constraints.Ordered, V any](m map[T]V) T { } return maxKey } + +func KeysSlice[T comparable, V any](in map[T]V) []T { + keys := []T{} + for k := range in { + keys = append(keys, k) + } + return keys +} + +func ValuesSlice[T comparable, V any](in map[T]V) []V { + values := []V{} + for _, v := range in { + values = append(values, v) + } + return values +} From a7608c8d2f1f4852ec500f83429292a21bdb6c49 Mon Sep 17 00:00:00 2001 From: Ran Mishael <106548467+ranlavanet@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:02:00 +0100 Subject: [PATCH 03/18] feat: increase target version to 4.1.3 (#1804) --- x/protocol/types/params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/protocol/types/params.go b/x/protocol/types/params.go index 27ce27395b..d923b38f5f 100644 --- a/x/protocol/types/params.go +++ b/x/protocol/types/params.go @@ -12,7 +12,7 @@ import ( var _ paramtypes.ParamSet = (*Params)(nil) const ( - TARGET_VERSION = "4.1.2" + TARGET_VERSION = "4.1.3" MIN_VERSION = "3.1.0" ) From 8e5ccb5459703c6a348d95ff05ce08dbbd9fb308 Mon Sep 17 00:00:00 2001 From: Ran Mishael <106548467+ranlavanet@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:25:36 +0100 Subject: [PATCH 04/18] feat: PRT - Add gas verification for celestia (#1801) Co-authored-by: Elad Gildnur <6321801+shleikes@users.noreply.github.com> --- cookbook/specs/celestia.json | 54 ++++++++++++++++ .../init_celestia_only_with_node.sh | 61 +++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100755 scripts/pre_setups/init_celestia_only_with_node.sh diff --git a/cookbook/specs/celestia.json b/cookbook/specs/celestia.json index c296b1d52f..e4a1323c4d 100644 --- a/cookbook/specs/celestia.json +++ b/cookbook/specs/celestia.json @@ -344,6 +344,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -711,6 +720,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1479,6 +1497,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1502,6 +1529,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1593,6 +1629,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ @@ -1616,6 +1661,15 @@ "inheritance_apis": [], "parse_directives": [], "verifications": [ + { + "name": "minimum-gas-price", + "values": [ + { + "expected_value": "0.002000000000000000utia", + "severity": "Warning" + } + ] + }, { "name": "chain-id", "values": [ diff --git a/scripts/pre_setups/init_celestia_only_with_node.sh b/scripts/pre_setups/init_celestia_only_with_node.sh new file mode 100755 index 0000000000..3bea92f70a --- /dev/null +++ b/scripts/pre_setups/init_celestia_only_with_node.sh @@ -0,0 +1,61 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source "$__dir"/../useful_commands.sh +. "${__dir}"/../vars/variables.sh + +LOGS_DIR=${__dir}/../../testutil/debugging/logs +mkdir -p $LOGS_DIR +rm $LOGS_DIR/*.log + +killall screen +screen -wipe + +echo "[Test Setup] installing all binaries" +make install-all + +echo "[Test Setup] setting up a new lava node" +screen -d -m -S node bash -c "./scripts/start_env_dev.sh" +screen -ls +echo "[Test Setup] sleeping 20 seconds for node to finish setup (if its not enough increase timeout)" +sleep 5 +wait_for_lava_node_to_start + +GASPRICE="0.00002ulava" +lavad tx gov submit-legacy-proposal spec-add ./cookbook/specs/ibc.json,./cookbook/specs/cosmoswasm.json,./cookbook/specs/tendermint.json,./cookbook/specs/cosmossdk.json,./cookbook/specs/cosmossdk_45.json,./cookbook/specs/cosmossdk_full.json,./cookbook/specs/ethermint.json,./cookbook/specs/ethereum.json,./cookbook/specs/cosmoshub.json,./cookbook/specs/lava.json,./cookbook/specs/osmosis.json,./cookbook/specs/fantom.json,./cookbook/specs/celo.json,./cookbook/specs/optimism.json,./cookbook/specs/arbitrum.json,./cookbook/specs/starknet.json,./cookbook/specs/aptos.json,./cookbook/specs/juno.json,./cookbook/specs/polygon.json,./cookbook/specs/evmos.json,./cookbook/specs/base.json,./cookbook/specs/canto.json,./cookbook/specs/sui.json,./cookbook/specs/solana.json,./cookbook/specs/bsc.json,./cookbook/specs/axelar.json,./cookbook/specs/avalanche.json,./cookbook/specs/fvm.json,./cookbook/specs/celestia.json --lava-dev-test -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE & +wait_next_block +wait_next_block +lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +sleep 4 + +# Plans proposal +lavad tx gov submit-legacy-proposal plans-add ./cookbook/plans/test_plans/default.json,./cookbook/plans/test_plans/temporary-add.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +wait_next_block +wait_next_block +lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +sleep 4 + +CLIENTSTAKE="500000000000ulava" +PROVIDERSTAKE="500000000000ulava" + +PROVIDER1_LISTENER="127.0.0.1:2220" + +lavad tx subscription buy DefaultPlan $(lavad keys show user1 -a) -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +wait_next_block +lavad tx pairing stake-provider "AXELAR" $PROVIDERSTAKE "$PROVIDER1_LISTENER,1" 1 $(operator_address) -y --from servicer1 --provider-moniker "dummyMoniker" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +sleep_until_next_epoch + +screen -d -m -S provider1 bash -c "source ~/.bashrc; lavap rpcprovider \ +$PROVIDER1_LISTENER CELESTIATM rest '$CELESTIA_REST' \ +$PROVIDER1_LISTENER CELESTIATM tendermintrpc '$CELESTIA_RPC,$CELESTIA_RPC' \ +$PROVIDER1_LISTENER CELESTIATM grpc '$CELESTIA_GRPC' \ +$PROVIDER1_LISTENER CELESTIATM jsonrpc '$CELESTIA_JSONRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --chain-id lava --metrics-listen-address ":7776" 2>&1 | tee $LOGS_DIR/PROVIDER1.log" && sleep 0.25 + +screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer \ +127.0.0.1:3360 CELESTIATM rest 127.0.0.1:3361 CELESTIATM tendermintrpc 127.0.0.1:3362 CELESTIATM grpc \ +$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 + +echo "--- setting up screens done ---" +screen -ls \ No newline at end of file From 04db6767e8afcb05cdb2d4e0a862f429fcc8383e Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:26:40 +0200 Subject: [PATCH 05/18] fix: PRT - Update Celo spec (#1791) * Update celo spec * Updated min_stake_provider --- cookbook/specs/celo.json | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/cookbook/specs/celo.json b/cookbook/specs/celo.json index 02fc646a2a..2d57721d9a 100644 --- a/cookbook/specs/celo.json +++ b/cookbook/specs/celo.json @@ -19,7 +19,7 @@ "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -79,12 +79,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 1, "blocks_in_finalization_proof": 3, - "average_block_time": 5000, - "allowed_block_lag_for_qos_sync": 2, + "average_block_time": 1000, + "allowed_block_lag_for_qos_sync": 10, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -107,6 +107,18 @@ "expected_value": "0xaef3" } ] + }, + { + "name": "pruning", + "values": [ + { + "latest_distance": 86400 + }, + { + "extension": "archive", + "expected_value": "0x0" + } + ] } ] } From 48187b3ae4f95d2cdff31166bc268f1f2aa26707 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Sun, 1 Dec 2024 13:47:57 +0200 Subject: [PATCH 06/18] fix: PRT - Add generic parser for Near tx (#1809) * Add generic parser for near tx * Update the parsers --- cookbook/specs/near.json | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/cookbook/specs/near.json b/cookbook/specs/near.json index 05604a66ce..121b4b4e05 100644 --- a/cookbook/specs/near.json +++ b/cookbook/specs/near.json @@ -339,7 +339,17 @@ "hanging_api": true }, "extra_compute_units": 0, - "timeout_ms": 10000 + "timeout_ms": 10000, + "parsers": [ + { + "parse_path": ".params.tx_hash", + "parse_type": "BLOCK_HASH" + }, + { + "parse_path": ".params.[0]", + "parse_type": "BLOCK_HASH" + } + ] }, { "name": "EXPERIMENTAL_tx_status", @@ -357,7 +367,17 @@ "subscription": false, "stateful": 0 }, - "extra_compute_units": 0 + "extra_compute_units": 0, + "parsers": [ + { + "parse_path": ".params.tx_hash", + "parse_type": "BLOCK_HASH" + }, + { + "parse_path": ".params.[0]", + "parse_type": "BLOCK_HASH" + } + ] }, { "name": "EXPERIMENTAL_receipt", From 5a4300753572ad98b9da47a2e5749bdbd30f9fa0 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Sun, 1 Dec 2024 14:37:55 +0200 Subject: [PATCH 07/18] Update the starknet spec (#1807) --- cookbook/specs/starknet.json | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/cookbook/specs/starknet.json b/cookbook/specs/starknet.json index 651bf0ade8..9228e7b7ff 100644 --- a/cookbook/specs/starknet.json +++ b/cookbook/specs/starknet.json @@ -11,12 +11,12 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 6, "blocks_in_finalization_proof": 3, - "average_block_time": 12000, + "average_block_time": 30000, "allowed_block_lag_for_qos_sync": 2, "shares": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { @@ -36,7 +36,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -93,7 +93,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -156,7 +156,7 @@ "parser_func": "PARSE_DICTIONARY_OR_ORDERED", "default_value": "latest" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -445,7 +445,7 @@ ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": false, @@ -598,6 +598,17 @@ "expected_value": "*" } ] + }, + { + "name": "pruning", + "parse_directive": { + "function_tag": "GET_BLOCK_BY_NUM" + }, + "values": [ + { + "expected_value": "1" + } + ] } ] }, @@ -969,11 +980,11 @@ "data_reliability_enabled": true, "block_distance_for_finalized_data": 1, "blocks_in_finalization_proof": 3, - "average_block_time": 1800000, + "average_block_time": 32000, "allowed_block_lag_for_qos_sync": 1, "min_stake_provider": { "denom": "ulava", - "amount": "47500000000" + "amount": "5000000000" }, "api_collections": [ { From 8d6763d905e596602bc9a1142fe00e16c6544f67 Mon Sep 17 00:00:00 2001 From: Ran Mishael <106548467+ranlavanet@users.noreply.github.com> Date: Sun, 1 Dec 2024 14:39:18 +0100 Subject: [PATCH 08/18] feat: PRT - archive retry attempt on second relay regardless of node error (#1810) * feat: PRT - archive retry attempt on second relay regardless of node error * increase protocol version --- protocol/rpcconsumer/relay_state.go | 2 +- x/protocol/types/params.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/rpcconsumer/relay_state.go b/protocol/rpcconsumer/relay_state.go index 6dcbfc7751..37264979ea 100644 --- a/protocol/rpcconsumer/relay_state.go +++ b/protocol/rpcconsumer/relay_state.go @@ -150,7 +150,7 @@ func (rs *RelayState) SetProtocolMessage(protocolMessage chainlib.ProtocolMessag } func (rs *RelayState) upgradeToArchiveIfNeeded(numberOfRetriesLaunched int, numberOfNodeErrors uint64) { - if rs == nil || rs.archiveStatus == nil || numberOfNodeErrors == 0 { + if rs == nil || rs.archiveStatus == nil { return } hashes := rs.GetProtocolMessage().GetRequestedBlocksHashes() diff --git a/x/protocol/types/params.go b/x/protocol/types/params.go index d923b38f5f..2848f2aeb3 100644 --- a/x/protocol/types/params.go +++ b/x/protocol/types/params.go @@ -12,7 +12,7 @@ import ( var _ paramtypes.ParamSet = (*Params)(nil) const ( - TARGET_VERSION = "4.1.3" + TARGET_VERSION = "4.1.4" MIN_VERSION = "3.1.0" ) From ad245175e2ea2b845859dd781ee9c037811a485a Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Sun, 1 Dec 2024 15:40:31 +0200 Subject: [PATCH 09/18] fix: PRT - Fix near "chunk" and "block" generic parsers (#1811) * Fix near generic parser * Add the array params back --- cookbook/specs/near.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cookbook/specs/near.json b/cookbook/specs/near.json index 121b4b4e05..e0c46c2cf1 100644 --- a/cookbook/specs/near.json +++ b/cookbook/specs/near.json @@ -95,6 +95,10 @@ "rule": "=final || =optimistic", "parse_type": "DEFAULT_VALUE" }, + { + "parse_path": ".params.block_id", + "parse_type": "BLOCK_HASH" + }, { "parse_path": ".params.[0]", "parse_type": "BLOCK_HASH" @@ -141,6 +145,10 @@ }, "extra_compute_units": 0, "parsers": [ + { + "parse_path": ".params.chunk_id", + "parse_type": "BLOCK_HASH" + }, { "parse_path": ".params.[0]", "parse_type": "BLOCK_HASH" From fcfbef41259e59dbef1beae65c91978a2cd0d6f9 Mon Sep 17 00:00:00 2001 From: Omer <100387053+omerlavanet@users.noreply.github.com> Date: Sun, 1 Dec 2024 16:23:02 +0200 Subject: [PATCH 10/18] chore: refactor state query access (#1766) * refactor state query access * remove direct usage of client.Context to allow the rewiring of lava over lava * refactor rpcconsumer, allow creating a server with a function * lint * added custom lava transport --- .../pkg/state/lavavisor_state_tracker.go | 5 +- protocol/badgegenerator/tracker.go | 4 +- protocol/badgeserver/tracker.go | 6 +- protocol/rpcconsumer/custom_transport.go | 23 ++ protocol/rpcconsumer/rpcconsumer.go | 267 ++++++++++-------- protocol/rpcprovider/rpcprovider.go | 7 +- .../statetracker/consumer_state_tracker.go | 23 +- protocol/statetracker/events.go | 21 +- .../statetracker/provider_state_tracker.go | 31 +- protocol/statetracker/state_tracker.go | 9 +- .../statetracker/updaters/event_tracker.go | 22 +- .../updaters/provider_freeze_jail_updater.go | 26 +- .../provider_freeze_jail_updater_mocks.go | 4 + protocol/statetracker/updaters/state_query.go | 109 ++++--- 14 files changed, 320 insertions(+), 237 deletions(-) create mode 100644 protocol/rpcconsumer/custom_transport.go diff --git a/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go b/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go index b460d47b5e..fb1ca9d511 100644 --- a/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go +++ b/ecosystem/lavavisor/pkg/state/lavavisor_state_tracker.go @@ -24,7 +24,8 @@ type LavaVisorStateTracker struct { func NewLavaVisorStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher) (lvst *LavaVisorStateTracker, err error) { // validate chainId - status, err := clientCtx.Client.Status(ctx) + stateQuery := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + status, err := stateQuery.Status(ctx) if err != nil { return nil, utils.LavaFormatError("[Lavavisor] failed getting status", err) } @@ -36,7 +37,7 @@ func NewLavaVisorStateTracker(ctx context.Context, txFactory tx.Factory, clientC if err != nil { utils.LavaFormatFatal("chain is missing Lava spec, cant initialize lavavisor", err) } - lst := &LavaVisorStateTracker{stateQuery: updaters.NewStateQuery(ctx, clientCtx), averageBlockTime: time.Duration(specResponse.Spec.AverageBlockTime) * time.Millisecond} + lst := &LavaVisorStateTracker{stateQuery: stateQuery, averageBlockTime: time.Duration(specResponse.Spec.AverageBlockTime) * time.Millisecond} return lst, nil } diff --git a/protocol/badgegenerator/tracker.go b/protocol/badgegenerator/tracker.go index aa82c5f90c..c7822bae83 100644 --- a/protocol/badgegenerator/tracker.go +++ b/protocol/badgegenerator/tracker.go @@ -28,11 +28,11 @@ func NewBadgeStateTracker(ctx context.Context, clientCtx cosmosclient.Context, c emergencyTracker, blockNotFoundCallback := statetracker.NewEmergencyTracker(nil) txFactory := tx.Factory{} txFactory = txFactory.WithChainID(chainId) - stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + sq := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, sq, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } - sq := updaters.NewStateQuery(ctx, clientCtx) esq := updaters.NewEpochStateQuery(sq) pst := &BadgeStateTracker{StateTracker: stateTrackerBase, stateQuery: esq, ConsumerEmergencyTrackerInf: emergencyTracker} diff --git a/protocol/badgeserver/tracker.go b/protocol/badgeserver/tracker.go index 9b13ddee42..c54326a488 100644 --- a/protocol/badgeserver/tracker.go +++ b/protocol/badgeserver/tracker.go @@ -28,12 +28,12 @@ func NewBadgeStateTracker(ctx context.Context, clientCtx cosmosclient.Context, c emergencyTracker, blockNotFoundCallback := statetracker.NewEmergencyTracker(nil) txFactory := tx.Factory{} txFactory = txFactory.WithChainID(chainId) - stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := statetracker.NewStateTracker(ctx, txFactory, stateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } - stateTracker := updaters.NewStateQuery(ctx, clientCtx) - epochStateTracker := updaters.NewEpochStateQuery(stateTracker) + epochStateTracker := updaters.NewEpochStateQuery(stateQuery) badgeStateTracker := &BadgeStateTracker{ StateTracker: stateTrackerBase, diff --git a/protocol/rpcconsumer/custom_transport.go b/protocol/rpcconsumer/custom_transport.go new file mode 100644 index 0000000000..aef36b3396 --- /dev/null +++ b/protocol/rpcconsumer/custom_transport.go @@ -0,0 +1,23 @@ +package rpcconsumer + +import ( + "net/http" +) + +type CustomLavaTransport struct { + transport http.RoundTripper +} + +func NewCustomLavaTransport(httpTransport http.RoundTripper) *CustomLavaTransport { + return &CustomLavaTransport{transport: httpTransport} +} + +func (c *CustomLavaTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Custom logic before the request + + // Delegate to the underlying RoundTripper (usually http.Transport) + resp, err := c.transport.RoundTrip(req) + + // Custom logic after the request + return resp, err +} diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 99dcb15a01..62448fe557 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -11,11 +11,14 @@ import ( "sync" "time" + rpchttp "github.com/cometbft/cometbft/rpc/client/http" + jsonrpcclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/config" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/lavanet/lava/v4/app" "github.com/lavanet/lava/v4/protocol/chainlib" "github.com/lavanet/lava/v4/protocol/common" @@ -151,7 +154,16 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt if err != nil { utils.LavaFormatFatal("failed creating RPCConsumer logs", err) } + consumerMetricsManager.SetVersion(upgrade.GetCurrentVersion().ConsumerVersion) + httpClient, err := jsonrpcclient.DefaultHTTPClient(options.clientCtx.NodeURI) + if err == nil { + httpClient.Transport = NewCustomLavaTransport(httpClient.Transport) + client, err := rpchttp.NewWithClient(options.clientCtx.NodeURI, "/websocket", httpClient) + if err == nil { + options.clientCtx = options.clientCtx.WithClient(client) + } + } // spawn up ConsumerStateTracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, options.clientCtx) @@ -161,6 +173,8 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt } rpcc.consumerStateTracker = consumerStateTracker + lavaChainFetcher.FetchLatestBlockNum(ctx) + lavaChainID := options.clientCtx.ChainID keyName, err := sigs.GetKeyName(options.clientCtx) if err != nil { @@ -213,119 +227,11 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt for _, rpcEndpoint := range options.rpcEndpoints { go func(rpcEndpoint *lavasession.RPCEndpoint) error { defer wg.Done() - chainParser, err := chainlib.NewChainParser(rpcEndpoint.ApiInterface) - if err != nil { - err = utils.LavaFormatError("failed creating chain parser", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - chainID := rpcEndpoint.ChainID - // create policyUpdaters per chain - newPolicyUpdater := updaters.NewPolicyUpdater(chainID, consumerStateTracker, consumerAddr.String(), chainParser, *rpcEndpoint) - policyUpdater, ok, err := policyUpdaters.LoadOrStore(chainID, newPolicyUpdater) - if err != nil { - errCh <- err - return utils.LavaFormatError("failed loading or storing policy updater", err, utils.LogAttr("endpoint", rpcEndpoint)) - } - if ok { - err := policyUpdater.AddPolicySetter(chainParser, *rpcEndpoint) - if err != nil { - errCh <- err - return utils.LavaFormatError("failed adding policy setter", err) - } - } - - err = statetracker.RegisterForSpecUpdatesOrSetStaticSpec(ctx, chainParser, options.cmdFlags.StaticSpecPath, *rpcEndpoint, rpcc.consumerStateTracker) - if err != nil { - err = utils.LavaFormatError("failed registering for spec updates", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - - _, averageBlockTime, _, _ := chainParser.ChainBlockStats() - var optimizer *provideroptimizer.ProviderOptimizer - var consumerConsistency *ConsumerConsistency - var finalizationConsensus *finalizationconsensus.FinalizationConsensus - getOrCreateChainAssets := func() error { - // this is locked so we don't race optimizers creation - chainMutexes[chainID].Lock() - defer chainMutexes[chainID].Unlock() - var loaded bool - var err error - - baseLatency := common.AverageWorldLatency / 2 // we want performance to be half our timeout or better - - // Create / Use existing optimizer - newOptimizer := provideroptimizer.NewProviderOptimizer(options.strategy, averageBlockTime, baseLatency, options.maxConcurrentProviders, consumerOptimizerQoSClient, chainID) - optimizer, loaded, err = optimizers.LoadOrStore(chainID, newOptimizer) - if err != nil { - return utils.LavaFormatError("failed loading optimizer", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - - if !loaded { - // if this is a new optimizer, register it in the consumerOptimizerQoSClient - consumerOptimizerQoSClient.RegisterOptimizer(optimizer, chainID) - } - - // Create / Use existing ConsumerConsistency - newConsumerConsistency := NewConsumerConsistency(chainID) - consumerConsistency, _, err = consumerConsistencies.LoadOrStore(chainID, newConsumerConsistency) - if err != nil { - return utils.LavaFormatError("failed loading consumer consistency", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - - // Create / Use existing FinalizationConsensus - newFinalizationConsensus := finalizationconsensus.NewFinalizationConsensus(rpcEndpoint.ChainID) - finalizationConsensus, loaded, err = finalizationConsensuses.LoadOrStore(chainID, newFinalizationConsensus) - if err != nil { - return utils.LavaFormatError("failed loading finalization consensus", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) - } - if !loaded { // when creating new finalization consensus instance we need to register it to updates - consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) - } - return nil - } - err = getOrCreateChainAssets() - if err != nil { - errCh <- err - return err - } - - if finalizationConsensus == nil || optimizer == nil || consumerConsistency == nil { - err = utils.LavaFormatError("failed getting assets, found a nil", nil, utils.Attribute{Key: "endpoint", Value: rpcEndpoint.Key()}) - errCh <- err - return err - } - - // Create active subscription provider storage for each unique chain - activeSubscriptionProvidersStorage := lavasession.NewActiveSubscriptionProvidersStorage() - consumerSessionManager := lavasession.NewConsumerSessionManager(rpcEndpoint, optimizer, consumerMetricsManager, consumerReportsManager, consumerAddr.String(), activeSubscriptionProvidersStorage) - // Register For Updates - rpcc.consumerStateTracker.RegisterConsumerSessionManagerForPairingUpdates(ctx, consumerSessionManager, options.staticProvidersList) - - var relaysMonitor *metrics.RelaysMonitor - if options.cmdFlags.RelaysHealthEnableFlag { - relaysMonitor = metrics.NewRelaysMonitor(options.cmdFlags.RelaysHealthIntervalFlag, rpcEndpoint.ChainID, rpcEndpoint.ApiInterface) - relaysMonitorAggregator.RegisterRelaysMonitor(rpcEndpoint.String(), relaysMonitor) - } - - rpcConsumerServer := &RPCConsumerServer{} - - var consumerWsSubscriptionManager *chainlib.ConsumerWSSubscriptionManager - var specMethodType string - if rpcEndpoint.ApiInterface == spectypes.APIInterfaceJsonRPC { - specMethodType = http.MethodPost - } - consumerWsSubscriptionManager = chainlib.NewConsumerWSSubscriptionManager(consumerSessionManager, rpcConsumerServer, options.refererData, specMethodType, chainParser, activeSubscriptionProvidersStorage, consumerMetricsManager) - - utils.LavaFormatInfo("RPCConsumer Listening", utils.Attribute{Key: "endpoints", Value: rpcEndpoint.String()}) - err = rpcConsumerServer.ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, options.requiredResponses, privKey, lavaChainID, options.cache, rpcConsumerMetrics, consumerAddr, consumerConsistency, relaysMonitor, options.cmdFlags, options.stateShare, options.refererData, consumerReportsManager, consumerWsSubscriptionManager) - if err != nil { - err = utils.LavaFormatError("failed serving rpc requests", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) - errCh <- err - return err - } - return nil + _, err := rpcc.CreateConsumerEndpoint(ctx, rpcEndpoint, errCh, consumerAddr, consumerStateTracker, + policyUpdaters, optimizers, consumerConsistencies, finalizationConsensuses, chainMutexes, + options, privKey, lavaChainID, rpcConsumerMetrics, consumerReportsManager, consumerOptimizerQoSClient, + consumerMetricsManager, relaysMonitorAggregator) + return err }(rpcEndpoint) } @@ -361,6 +267,141 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt return nil } +func (rpcc *RPCConsumer) CreateConsumerEndpoint( + ctx context.Context, + rpcEndpoint *lavasession.RPCEndpoint, + errCh chan error, + consumerAddr sdk.AccAddress, + consumerStateTracker *statetracker.ConsumerStateTracker, + policyUpdaters *common.SafeSyncMap[string, *updaters.PolicyUpdater], + optimizers *common.SafeSyncMap[string, *provideroptimizer.ProviderOptimizer], + consumerConsistencies *common.SafeSyncMap[string, *ConsumerConsistency], + finalizationConsensuses *common.SafeSyncMap[string, *finalizationconsensus.FinalizationConsensus], + chainMutexes map[string]*sync.Mutex, + options *rpcConsumerStartOptions, + privKey *secp256k1.PrivateKey, + lavaChainID string, + rpcConsumerMetrics *metrics.RPCConsumerLogs, + consumerReportsManager *metrics.ConsumerReportsClient, + consumerOptimizerQoSClient *metrics.ConsumerOptimizerQoSClient, + consumerMetricsManager *metrics.ConsumerMetricsManager, + relaysMonitorAggregator *metrics.RelaysMonitorAggregator, +) (*RPCConsumerServer, error) { + chainParser, err := chainlib.NewChainParser(rpcEndpoint.ApiInterface) + if err != nil { + err = utils.LavaFormatError("failed creating chain parser", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + chainID := rpcEndpoint.ChainID + // create policyUpdaters per chain + newPolicyUpdater := updaters.NewPolicyUpdater(chainID, consumerStateTracker, consumerAddr.String(), chainParser, *rpcEndpoint) + policyUpdater, ok, err := policyUpdaters.LoadOrStore(chainID, newPolicyUpdater) + if err != nil { + errCh <- err + return nil, utils.LavaFormatError("failed loading or storing policy updater", err, utils.LogAttr("endpoint", rpcEndpoint)) + } + if ok { + err := policyUpdater.AddPolicySetter(chainParser, *rpcEndpoint) + if err != nil { + errCh <- err + return nil, utils.LavaFormatError("failed adding policy setter", err) + } + } + + err = statetracker.RegisterForSpecUpdatesOrSetStaticSpec(ctx, chainParser, options.cmdFlags.StaticSpecPath, *rpcEndpoint, rpcc.consumerStateTracker) + if err != nil { + err = utils.LavaFormatError("failed registering for spec updates", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + + _, averageBlockTime, _, _ := chainParser.ChainBlockStats() + var optimizer *provideroptimizer.ProviderOptimizer + var consumerConsistency *ConsumerConsistency + var finalizationConsensus *finalizationconsensus.FinalizationConsensus + getOrCreateChainAssets := func() error { + // this is locked so we don't race optimizers creation + chainMutexes[chainID].Lock() + defer chainMutexes[chainID].Unlock() + var loaded bool + var err error + + baseLatency := common.AverageWorldLatency / 2 // we want performance to be half our timeout or better + + // Create / Use existing optimizer + newOptimizer := provideroptimizer.NewProviderOptimizer(options.strategy, averageBlockTime, baseLatency, options.maxConcurrentProviders, consumerOptimizerQoSClient, chainID) + optimizer, loaded, err = optimizers.LoadOrStore(chainID, newOptimizer) + if err != nil { + return utils.LavaFormatError("failed loading optimizer", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + + if !loaded { + // if this is a new optimizer, register it in the consumerOptimizerQoSClient + consumerOptimizerQoSClient.RegisterOptimizer(optimizer, chainID) + } + + // Create / Use existing ConsumerConsistency + newConsumerConsistency := NewConsumerConsistency(chainID) + consumerConsistency, _, err = consumerConsistencies.LoadOrStore(chainID, newConsumerConsistency) + if err != nil { + return utils.LavaFormatError("failed loading consumer consistency", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + + // Create / Use existing FinalizationConsensus + newFinalizationConsensus := finalizationconsensus.NewFinalizationConsensus(rpcEndpoint.ChainID) + finalizationConsensus, loaded, err = finalizationConsensuses.LoadOrStore(chainID, newFinalizationConsensus) + if err != nil { + return utils.LavaFormatError("failed loading finalization consensus", err, utils.LogAttr("endpoint", rpcEndpoint.Key())) + } + if !loaded { // when creating new finalization consensus instance we need to register it to updates + consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) + } + return nil + } + err = getOrCreateChainAssets() + if err != nil { + errCh <- err + return nil, err + } + + if finalizationConsensus == nil || optimizer == nil || consumerConsistency == nil { + err = utils.LavaFormatError("failed getting assets, found a nil", nil, utils.Attribute{Key: "endpoint", Value: rpcEndpoint.Key()}) + errCh <- err + return nil, err + } + + // Create active subscription provider storage for each unique chain + activeSubscriptionProvidersStorage := lavasession.NewActiveSubscriptionProvidersStorage() + consumerSessionManager := lavasession.NewConsumerSessionManager(rpcEndpoint, optimizer, consumerMetricsManager, consumerReportsManager, consumerAddr.String(), activeSubscriptionProvidersStorage) + // Register For Updates + rpcc.consumerStateTracker.RegisterConsumerSessionManagerForPairingUpdates(ctx, consumerSessionManager, options.staticProvidersList) + + var relaysMonitor *metrics.RelaysMonitor + if options.cmdFlags.RelaysHealthEnableFlag { + relaysMonitor = metrics.NewRelaysMonitor(options.cmdFlags.RelaysHealthIntervalFlag, rpcEndpoint.ChainID, rpcEndpoint.ApiInterface) + relaysMonitorAggregator.RegisterRelaysMonitor(rpcEndpoint.String(), relaysMonitor) + } + + rpcConsumerServer := &RPCConsumerServer{} + + var consumerWsSubscriptionManager *chainlib.ConsumerWSSubscriptionManager + var specMethodType string + if rpcEndpoint.ApiInterface == spectypes.APIInterfaceJsonRPC { + specMethodType = http.MethodPost + } + consumerWsSubscriptionManager = chainlib.NewConsumerWSSubscriptionManager(consumerSessionManager, rpcConsumerServer, options.refererData, specMethodType, chainParser, activeSubscriptionProvidersStorage, consumerMetricsManager) + + utils.LavaFormatInfo("RPCConsumer Listening", utils.Attribute{Key: "endpoints", Value: rpcEndpoint.String()}) + err = rpcConsumerServer.ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, options.requiredResponses, privKey, lavaChainID, options.cache, rpcConsumerMetrics, consumerAddr, consumerConsistency, relaysMonitor, options.cmdFlags, options.stateShare, options.refererData, consumerReportsManager, consumerWsSubscriptionManager) + if err != nil { + err = utils.LavaFormatError("failed serving rpc requests", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) + errCh <- err + return nil, err + } + return rpcConsumerServer, nil +} + func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints []*lavasession.RPCEndpoint, err error) { err = viper_endpoints.UnmarshalKey(common.EndpointsConfigName, &endpoints) if err != nil { diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 1e83f7eea0..794b64f295 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -219,7 +219,7 @@ func (rpcp *RPCProvider) Start(options *rpcProviderStartOptions) (err error) { utils.LavaFormatInfo("RPCProvider pubkey: " + rpcp.addr.String()) - rpcp.createAndRegisterFreezeUpdatersByOptions(ctx, options.clientCtx, rpcp.addr.String()) + rpcp.createAndRegisterFreezeUpdatersByOptions(ctx, providerStateTracker.StateQuery.StateQuery, rpcp.addr.String()) utils.LavaFormatInfo("RPCProvider setting up endpoints", utils.Attribute{Key: "count", Value: strconv.Itoa(len(options.rpcProviderEndpoints))}) @@ -282,9 +282,8 @@ func (rpcp *RPCProvider) Start(options *rpcProviderStartOptions) (err error) { return nil } -func (rpcp *RPCProvider) createAndRegisterFreezeUpdatersByOptions(ctx context.Context, clientCtx client.Context, publicAddress string) { - queryClient := pairingtypes.NewQueryClient(clientCtx) - freezeJailUpdater := updaters.NewProviderFreezeJailUpdater(queryClient, publicAddress, rpcp.providerMetricsManager) +func (rpcp *RPCProvider) createAndRegisterFreezeUpdatersByOptions(ctx context.Context, stateQuery *updaters.StateQuery, publicAddress string) { + freezeJailUpdater := updaters.NewProviderFreezeJailUpdater(stateQuery, publicAddress, rpcp.providerMetricsManager) rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, freezeJailUpdater) } diff --git a/protocol/statetracker/consumer_state_tracker.go b/protocol/statetracker/consumer_state_tracker.go index 8ab2bd046e..ffa21cd6a7 100644 --- a/protocol/statetracker/consumer_state_tracker.go +++ b/protocol/statetracker/consumer_state_tracker.go @@ -25,7 +25,7 @@ type ConsumerTxSenderInf interface { // ConsumerStateTracker CSTis a class for tracking consumer data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ConsumerStateTracker struct { - stateQuery *updaters.ConsumerStateQuery + StateQuery *updaters.ConsumerStateQuery ConsumerTxSenderInf *StateTracker ConsumerEmergencyTrackerInf @@ -34,7 +34,8 @@ type ConsumerStateTracker struct { func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, metrics *metrics.ConsumerMetricsManager, disableConflictTransactions bool) (ret *ConsumerStateTracker, err error) { emergencyTracker, blockNotFoundCallback := NewEmergencyTracker(metrics) - stateTrackerBase, err := NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewConsumerStateQuery(ctx, clientCtx) + stateTrackerBase, err := NewStateTracker(ctx, txFactory, stateQuery.StateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } @@ -44,7 +45,7 @@ func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } cst := &ConsumerStateTracker{ StateTracker: stateTrackerBase, - stateQuery: updaters.NewConsumerStateQuery(ctx, clientCtx), + StateQuery: stateQuery, ConsumerTxSenderInf: txSender, ConsumerEmergencyTrackerInf: emergencyTracker, disableConflictTransactions: disableConflictTransactions, @@ -56,7 +57,7 @@ func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCt func (cst *ConsumerStateTracker) RegisterConsumerSessionManagerForPairingUpdates(ctx context.Context, consumerSessionManager *lavasession.ConsumerSessionManager, staticProvidersList []*lavasession.RPCProviderEndpoint) { // register this CSM to get the updated pairing list when a new epoch starts - pairingUpdater := updaters.NewPairingUpdater(cst.stateQuery, consumerSessionManager.RPCEndpoint().ChainID) + pairingUpdater := updaters.NewPairingUpdater(cst.StateQuery, consumerSessionManager.RPCEndpoint().ChainID) pairingUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, pairingUpdater) pairingUpdater, ok := pairingUpdaterRaw.(*updaters.PairingUpdater) if !ok { @@ -81,7 +82,7 @@ func (cst *ConsumerStateTracker) RegisterConsumerSessionManagerForPairingUpdates } func (cst *ConsumerStateTracker) RegisterForPairingUpdates(ctx context.Context, pairingUpdatable updaters.PairingUpdatable, specId string) { - pairingUpdater := updaters.NewPairingUpdater(cst.stateQuery, specId) + pairingUpdater := updaters.NewPairingUpdater(cst.StateQuery, specId) pairingUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, pairingUpdater) pairingUpdater, ok := pairingUpdaterRaw.(*updaters.PairingUpdater) if !ok { @@ -94,7 +95,7 @@ func (cst *ConsumerStateTracker) RegisterForPairingUpdates(ctx context.Context, } func (cst *ConsumerStateTracker) RegisterFinalizationConsensusForUpdates(ctx context.Context, finalizationConsensus *finalizationconsensus.FinalizationConsensus) { - finalizationConsensusUpdater := updaters.NewFinalizationConsensusUpdater(cst.stateQuery, finalizationConsensus.SpecId) + finalizationConsensusUpdater := updaters.NewFinalizationConsensusUpdater(cst.StateQuery, finalizationConsensus.SpecId) finalizationConsensusUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, finalizationConsensusUpdater) finalizationConsensusUpdater, ok := finalizationConsensusUpdaterRaw.(*updaters.FinalizationConsensusUpdater) if !ok { @@ -120,7 +121,7 @@ func (cst *ConsumerStateTracker) TxConflictDetection(ctx context.Context, finali func (cst *ConsumerStateTracker) RegisterForSpecUpdates(ctx context.Context, specUpdatable updaters.SpecUpdatable, endpoint lavasession.RPCEndpoint) error { // register for spec updates sets spec and updates when a spec has been modified - specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, cst.stateQuery, cst.EventTracker) + specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, cst.StateQuery, cst.EventTracker) specUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -130,11 +131,11 @@ func (cst *ConsumerStateTracker) RegisterForSpecUpdates(ctx context.Context, spe } func (cst *ConsumerStateTracker) GetConsumerPolicy(ctx context.Context, consumerAddress, chainID string) (*plantypes.Policy, error) { - return cst.stateQuery.GetEffectivePolicy(ctx, consumerAddress, chainID) + return cst.StateQuery.GetEffectivePolicy(ctx, consumerAddress, chainID) } func (cst *ConsumerStateTracker) RegisterForVersionUpdates(ctx context.Context, version *protocoltypes.Version, versionValidator updaters.VersionValidationInf) { - versionUpdater := updaters.NewVersionUpdater(cst.stateQuery, cst.EventTracker, version, versionValidator) + versionUpdater := updaters.NewVersionUpdater(cst.StateQuery, cst.EventTracker, version, versionValidator) versionUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, versionUpdater) versionUpdater, ok := versionUpdaterRaw.(*updaters.VersionUpdater) if !ok { @@ -145,7 +146,7 @@ func (cst *ConsumerStateTracker) RegisterForVersionUpdates(ctx context.Context, func (cst *ConsumerStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Context, downtimeParamsUpdatable updaters.DowntimeParamsUpdatable) error { // register for downtimeParams updates sets downtimeParams and updates when downtimeParams has been changed - downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(cst.stateQuery, cst.EventTracker) + downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(cst.StateQuery, cst.EventTracker) downtimeParamsUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, downtimeParamsUpdater) downtimeParamsUpdater, ok := downtimeParamsUpdaterRaw.(*updaters.DowntimeParamsUpdater) if !ok { @@ -156,5 +157,5 @@ func (cst *ConsumerStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Co } func (cst *ConsumerStateTracker) GetProtocolVersion(ctx context.Context) (*updaters.ProtocolVersionResponse, error) { - return cst.stateQuery.GetProtocolVersion(ctx) + return cst.StateQuery.GetProtocolVersion(ctx) } diff --git a/protocol/statetracker/events.go b/protocol/statetracker/events.go index 9b41f74842..1aa68f248a 100644 --- a/protocol/statetracker/events.go +++ b/protocol/statetracker/events.go @@ -65,11 +65,8 @@ func eventsLookup(ctx context.Context, clientCtx client.Context, blocks, fromBlo defer ticker.Stop() readEventsFromBlock := func(blockFrom int64, blockTo int64, hash string) { for block := blockFrom; block < blockTo; block++ { - brp, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } - blockResults, err := brp.BlockResults(ctx, &block) + queryInst := updaters.NewStateQueryAccessInst(clientCtx) + blockResults, err := queryInst.BlockResults(ctx, &block) if err != nil { utils.LavaFormatError("invalid blockResults status", err) return @@ -275,14 +272,11 @@ func paymentsLookup(ctx context.Context, clientCtx client.Context, blockStart, b continue } utils.LavaFormatInfo("fetching block", utils.LogAttr("block", block)) - brp, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } + queryInst := updaters.NewStateQueryAccessInst(clientCtx) var blockResults *coretypes.ResultBlockResults for retry := 0; retry < 3; retry++ { ctxWithTimeout, cancelContextWithTimeout := context.WithTimeout(ctx, time.Second*30) - blockResults, err = brp.BlockResults(ctxWithTimeout, &block) + blockResults, err = queryInst.BlockResults(ctxWithTimeout, &block) cancelContextWithTimeout() if err != nil { utils.LavaFormatWarning("@@@@ failed fetching block results will retry", err, utils.LogAttr("block_number", block)) @@ -660,10 +654,7 @@ func countTransactionsPerDay(ctx context.Context, clientCtx client.Context, bloc utils.LogAttr("starting_block", latestHeight-numberOfBlocksInADay), ) - tmClient, err := updaters.TryIntoTendermintRPC(clientCtx.Client) - if err != nil { - utils.LavaFormatFatal("invalid blockResults provider", err) - } + queryInst := updaters.NewStateQueryAccessInst(clientCtx) // i is days // j are blocks in that day // starting from current day and going backwards @@ -697,7 +688,7 @@ func countTransactionsPerDay(ctx context.Context, clientCtx client.Context, bloc defer wg.Done() ctxWithTimeout, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - blockResults, err := tmClient.BlockResults(ctxWithTimeout, &k) + blockResults, err := queryInst.BlockResults(ctxWithTimeout, &k) if err != nil { utils.LavaFormatError("invalid blockResults status", err) return diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 2dc0bbddcb..d6f5423b72 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -19,7 +19,7 @@ import ( // ProviderStateTracker PST is a class for tracking provider data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ProviderStateTracker struct { - stateQuery *updaters.ProviderStateQuery + StateQuery *updaters.ProviderStateQuery txSender *ProviderTxSender *StateTracker *EmergencyTracker @@ -27,7 +27,8 @@ type ProviderStateTracker struct { func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, metrics *metrics.ProviderMetricsManager) (ret *ProviderStateTracker, err error) { emergencyTracker, blockNotFoundCallback := NewEmergencyTracker(metrics) - stateTrackerBase, err := NewStateTracker(ctx, txFactory, clientCtx, chainFetcher, blockNotFoundCallback) + stateQuery := updaters.NewProviderStateQuery(ctx, updaters.NewStateQueryAccessInst(clientCtx)) + stateTrackerBase, err := NewStateTracker(ctx, txFactory, stateQuery.StateQuery, chainFetcher, blockNotFoundCallback) if err != nil { return nil, err } @@ -37,7 +38,7 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } pst := &ProviderStateTracker{ StateTracker: stateTrackerBase, - stateQuery: updaters.NewProviderStateQuery(ctx, clientCtx), + StateQuery: stateQuery, txSender: txSender, EmergencyTracker: emergencyTracker, } @@ -49,7 +50,7 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable updaters.EpochUpdatable) { - epochUpdater := updaters.NewEpochUpdater(&pst.stateQuery.EpochStateQuery) + epochUpdater := updaters.NewEpochUpdater(&pst.StateQuery.EpochStateQuery) epochUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, epochUpdater) epochUpdater, ok := epochUpdaterRaw.(*updaters.EpochUpdater) if !ok { @@ -60,7 +61,7 @@ func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, ep func (pst *ProviderStateTracker) RegisterForSpecUpdates(ctx context.Context, specUpdatable updaters.SpecUpdatable, endpoint lavasession.RPCEndpoint) error { // register for spec updates sets spec and updates when a spec has been modified - specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, pst.stateQuery, pst.EventTracker) + specUpdater := updaters.NewSpecUpdater(endpoint.ChainID, pst.StateQuery, pst.EventTracker) specUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -71,7 +72,7 @@ func (pst *ProviderStateTracker) RegisterForSpecUpdates(ctx context.Context, spe func (pst *ProviderStateTracker) RegisterForSpecVerifications(ctx context.Context, specVerifier updaters.SpecVerifier, chainId string) error { // register for spec verifications sets spec and verifies when a spec has been modified - specUpdater := updaters.NewSpecUpdater(chainId, pst.stateQuery, pst.EventTracker) + specUpdater := updaters.NewSpecUpdater(chainId, pst.StateQuery, pst.EventTracker) specUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, specUpdater) specUpdater, ok := specUpdaterRaw.(*updaters.SpecUpdater) if !ok { @@ -81,7 +82,7 @@ func (pst *ProviderStateTracker) RegisterForSpecVerifications(ctx context.Contex } func (pst *ProviderStateTracker) RegisterForVersionUpdates(ctx context.Context, version *protocoltypes.Version, versionValidator updaters.VersionValidationInf) { - versionUpdater := updaters.NewVersionUpdater(pst.stateQuery, pst.EventTracker, version, versionValidator) + versionUpdater := updaters.NewVersionUpdater(pst.StateQuery, pst.EventTracker, version, versionValidator) versionUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, versionUpdater) versionUpdater, ok := versionUpdaterRaw.(*updaters.VersionUpdater) if !ok { @@ -114,7 +115,7 @@ func (pst *ProviderStateTracker) RegisterPaymentUpdatableForPayments(ctx context func (pst *ProviderStateTracker) RegisterForDowntimeParamsUpdates(ctx context.Context, downtimeParamsUpdatable updaters.DowntimeParamsUpdatable) error { // register for downtimeParams updates sets downtimeParams and updates when downtimeParams has been changed - downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(pst.stateQuery, pst.EventTracker) + downtimeParamsUpdater := updaters.NewDowntimeParamsUpdater(pst.StateQuery, pst.EventTracker) downtimeParamsUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, downtimeParamsUpdater) downtimeParamsUpdater, ok := downtimeParamsUpdaterRaw.(*updaters.DowntimeParamsUpdater) if !ok { @@ -141,31 +142,31 @@ func (pst *ProviderStateTracker) LatestBlock() int64 { } func (pst *ProviderStateTracker) GetMaxCuForUser(ctx context.Context, consumerAddress, chainID string, epoch uint64) (maxCu uint64, err error) { - return pst.stateQuery.GetMaxCuForUser(ctx, consumerAddress, chainID, epoch) + return pst.StateQuery.GetMaxCuForUser(ctx, consumerAddress, chainID, epoch) } func (pst *ProviderStateTracker) VerifyPairing(ctx context.Context, consumerAddress, providerAddress string, epoch uint64, chainID string) (valid bool, total int64, projectId string, err error) { - return pst.stateQuery.VerifyPairing(ctx, consumerAddress, providerAddress, epoch, chainID) + return pst.StateQuery.VerifyPairing(ctx, consumerAddress, providerAddress, epoch, chainID) } func (pst *ProviderStateTracker) GetEpochSize(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetEpochSize(ctx) + return pst.StateQuery.GetEpochSize(ctx) } func (pst *ProviderStateTracker) EarliestBlockInMemory(ctx context.Context) (uint64, error) { - return pst.stateQuery.EarliestBlockInMemory(ctx) + return pst.StateQuery.EarliestBlockInMemory(ctx) } func (pst *ProviderStateTracker) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetRecommendedEpochNumToCollectPayment(ctx) + return pst.StateQuery.GetRecommendedEpochNumToCollectPayment(ctx) } func (pst *ProviderStateTracker) GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - return pst.stateQuery.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) + return pst.StateQuery.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) } func (pst *ProviderStateTracker) GetProtocolVersion(ctx context.Context) (*updaters.ProtocolVersionResponse, error) { - return pst.stateQuery.GetProtocolVersion(ctx) + return pst.StateQuery.GetProtocolVersion(ctx) } func (pst *ProviderStateTracker) GetAverageBlockTime() time.Duration { diff --git a/protocol/statetracker/state_tracker.go b/protocol/statetracker/state_tracker.go index c50639cb34..a87ecbf8b7 100644 --- a/protocol/statetracker/state_tracker.go +++ b/protocol/statetracker/state_tracker.go @@ -5,7 +5,6 @@ import ( "sync" "time" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/lavanet/lava/v4/protocol/chainlib" "github.com/lavanet/lava/v4/protocol/chaintracker" @@ -93,9 +92,9 @@ func GetLavaSpecWithRetry(ctx context.Context, specQueryClient spectypes.QueryCl return specResponse, err } -func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher, blockNotFoundCallback func(latestBlockTime time.Time)) (ret *StateTracker, err error) { +func NewStateTracker(ctx context.Context, txFactory tx.Factory, stateQuery *updaters.StateQuery, chainFetcher chaintracker.ChainFetcher, blockNotFoundCallback func(latestBlockTime time.Time)) (ret *StateTracker, err error) { // validate chainId - status, err := clientCtx.Client.Status(ctx) + status, err := stateQuery.Status(ctx) if err != nil { return nil, utils.LavaFormatError("failed getting status", err) } @@ -103,7 +102,7 @@ func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client return nil, utils.LavaFormatError("Chain ID mismatch", nil, utils.Attribute{Key: "--chain-id", Value: txFactory.ChainID()}, utils.Attribute{Key: "Node chainID", Value: status.NodeInfo.Network}) } - eventTracker := &updaters.EventTracker{ClientCtx: clientCtx} + eventTracker := &updaters.EventTracker{StateQuery: stateQuery} for i := 0; i < updaters.BlockResultRetry; i++ { err = eventTracker.UpdateBlockResults(0) if err == nil { @@ -114,7 +113,7 @@ func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client if err != nil { return nil, utils.LavaFormatError("failed getting blockResults after retries", err) } - specQueryClient := spectypes.NewQueryClient(clientCtx) + specQueryClient := stateQuery.GetSpecQueryClient() specResponse, err := GetLavaSpecWithRetry(ctx, specQueryClient) if err != nil { utils.LavaFormatFatal("failed querying lava spec for state tracker", err) diff --git a/protocol/statetracker/updaters/event_tracker.go b/protocol/statetracker/updaters/event_tracker.go index 6f442a83af..ec93865abe 100644 --- a/protocol/statetracker/updaters/event_tracker.go +++ b/protocol/statetracker/updaters/event_tracker.go @@ -2,14 +2,12 @@ package updaters import ( "context" - "fmt" "sync" "time" "golang.org/x/exp/slices" ctypes "github.com/cometbft/cometbft/rpc/core/types" - "github.com/cosmos/cosmos-sdk/client" "github.com/lavanet/lava/v4/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/v4/protocol/rpcprovider/rewardserver" "github.com/lavanet/lava/v4/utils" @@ -25,8 +23,8 @@ const ( var TimeOutForFetchingLavaBlocks = time.Second * 5 type EventTracker struct { - lock sync.RWMutex - ClientCtx client.Context + lock sync.RWMutex + *StateQuery blockResults *ctypes.ResultBlockResults latestUpdatedBlock int64 } @@ -38,7 +36,7 @@ func (et *EventTracker) UpdateBlockResults(latestBlock int64) (err error) { var res *ctypes.ResultStatus for i := 0; i < 3; i++ { timeoutCtx, cancel := context.WithTimeout(ctx, TimeOutForFetchingLavaBlocks) - res, err = et.ClientCtx.Client.Status(timeoutCtx) + res, err = et.StateQuery.Status(timeoutCtx) cancel() if err == nil { break @@ -50,14 +48,10 @@ func (et *EventTracker) UpdateBlockResults(latestBlock int64) (err error) { latestBlock = res.SyncInfo.LatestBlockHeight } - brp, err := TryIntoTendermintRPC(et.ClientCtx.Client) - if err != nil { - return utils.LavaFormatError("failed converting client.TendermintRPC to tendermintRPC", err) - } var blockResults *ctypes.ResultBlockResults for i := 0; i < BlockResultRetry; i++ { timeoutCtx, cancel := context.WithTimeout(ctx, TimeOutForFetchingLavaBlocks) - blockResults, err = brp.BlockResults(timeoutCtx, &latestBlock) + blockResults, err = et.StateQuery.BlockResults(timeoutCtx, &latestBlock) cancel() if err == nil { break @@ -216,11 +210,3 @@ type tendermintRPC interface { height *int64, ) (*ctypes.ResultConsensusParams, error) } - -func TryIntoTendermintRPC(cl client.TendermintRPC) (tendermintRPC, error) { - brp, ok := cl.(tendermintRPC) - if !ok { - return nil, fmt.Errorf("client does not implement tendermintRPC: %T", cl) - } - return brp, nil -} diff --git a/protocol/statetracker/updaters/provider_freeze_jail_updater.go b/protocol/statetracker/updaters/provider_freeze_jail_updater.go index 69f24ad1ab..0be8a93e8c 100644 --- a/protocol/statetracker/updaters/provider_freeze_jail_updater.go +++ b/protocol/statetracker/updaters/provider_freeze_jail_updater.go @@ -6,15 +6,15 @@ import ( "github.com/lavanet/lava/v4/utils" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" - "google.golang.org/grpc" + grpc "google.golang.org/grpc" ) const ( CallbackKeyForFreezeUpdate = "freeze-update" ) -type ProviderPairingStatusStateQueryInf interface { - Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) +type ProviderQueryGetter interface { + GetPairingQueryClient() pairingtypes.QueryClient } type ProviderMetricsManagerInf interface { @@ -30,27 +30,31 @@ const ( FROZEN ) +type ProviderPairingStatusStateQueryInf interface { + Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) +} + type ProviderFreezeJailUpdater struct { - pairingQueryClient ProviderPairingStatusStateQueryInf - metricsManager ProviderMetricsManagerInf - publicAddress string + querier ProviderPairingStatusStateQueryInf + metricsManager ProviderMetricsManagerInf + publicAddress string } func NewProviderFreezeJailUpdater( - pairingQueryClient ProviderPairingStatusStateQueryInf, + querier ProviderPairingStatusStateQueryInf, publicAddress string, metricsManager ProviderMetricsManagerInf, ) *ProviderFreezeJailUpdater { return &ProviderFreezeJailUpdater{ - pairingQueryClient: pairingQueryClient, - publicAddress: publicAddress, - metricsManager: metricsManager, + querier: querier, + publicAddress: publicAddress, + metricsManager: metricsManager, } } func (pfu *ProviderFreezeJailUpdater) UpdateEpoch(epoch uint64) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - response, err := pfu.pairingQueryClient.Provider(ctx, &pairingtypes.QueryProviderRequest{Address: pfu.publicAddress}) + response, err := pfu.querier.Provider(ctx, &pairingtypes.QueryProviderRequest{Address: pfu.publicAddress}) cancel() if err != nil { diff --git a/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go b/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go index 24b0738393..5b506dd58f 100644 --- a/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go +++ b/protocol/statetracker/updaters/provider_freeze_jail_updater_mocks.go @@ -41,6 +41,10 @@ func (m *MockProviderPairingStatusStateQueryInf) EXPECT() *MockProviderPairingSt return m.recorder } +func (m *MockProviderPairingStatusStateQueryInf) GetPairingQueryClient() ProviderPairingStatusStateQueryInf { + return m +} + // Provider mocks base method. func (m *MockProviderPairingStatusStateQueryInf) Provider(ctx context.Context, in *types.QueryProviderRequest, opts ...grpc.CallOption) (*types.QueryProviderResponse, error) { m.ctrl.T.Helper() diff --git a/protocol/statetracker/updaters/state_query.go b/protocol/statetracker/updaters/state_query.go index 51cb45ec61..0877f171c1 100644 --- a/protocol/statetracker/updaters/state_query.go +++ b/protocol/statetracker/updaters/state_query.go @@ -9,6 +9,7 @@ import ( downtimev1 "github.com/lavanet/lava/v4/x/downtime/v1" "github.com/cosmos/cosmos-sdk/client" + grpc1 "github.com/cosmos/gogoproto/grpc" "github.com/dgraph-io/ristretto" reliabilitymanager "github.com/lavanet/lava/v4/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/v4/utils" @@ -37,22 +38,40 @@ type ProtocolVersionResponse struct { BlockNumber string } +type StateQueryAccessInf interface { + grpc1.ClientConn + tendermintRPC + client.TendermintRPC +} + +type StateQueryAccessInst struct { + grpc1.ClientConn + tendermintRPC + client.TendermintRPC +} + +func NewStateQueryAccessInst(clientCtx client.Context) *StateQueryAccessInst { + tenderRpc, ok := clientCtx.Client.(tendermintRPC) + if !ok { + utils.LavaFormatFatal("failed casting tendermint rpc from client context", nil) + } + return &StateQueryAccessInst{ClientConn: clientCtx, tendermintRPC: tenderRpc, TendermintRPC: clientCtx.Client} +} + type StateQuery struct { - SpecQueryClient spectypes.QueryClient - PairingQueryClient pairingtypes.QueryClient - EpochStorageQueryClient epochstoragetypes.QueryClient - ProtocolClient protocoltypes.QueryClient - DowntimeClient downtimev1.QueryClient + specQueryClient spectypes.QueryClient + pairingQueryClient pairingtypes.QueryClient + epochStorageQueryClient epochstoragetypes.QueryClient + protocolClient protocoltypes.QueryClient + downtimeClient downtimev1.QueryClient ResponsesCache *ristretto.Cache + tendermintRPC + client.TendermintRPC } -func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { +func NewStateQuery(ctx context.Context, accessInf StateQueryAccessInf) *StateQuery { sq := &StateQuery{} - sq.SpecQueryClient = spectypes.NewQueryClient(clientCtx) - sq.PairingQueryClient = pairingtypes.NewQueryClient(clientCtx) - sq.EpochStorageQueryClient = epochstoragetypes.NewQueryClient(clientCtx) - sq.ProtocolClient = protocoltypes.NewQueryClient(clientCtx) - sq.DowntimeClient = downtimev1.NewQueryClient(clientCtx) + sq.UpdateAccess(accessInf) cache, err := ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: CacheMaxCost, BufferItems: 64}) if err != nil { utils.LavaFormatFatal("failed setting up cache for queries", err) @@ -61,9 +80,27 @@ func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { return sq } +func (sq *StateQuery) UpdateAccess(accessInf StateQueryAccessInf) { + sq.specQueryClient = spectypes.NewQueryClient(accessInf) + sq.pairingQueryClient = pairingtypes.NewQueryClient(accessInf) + sq.epochStorageQueryClient = epochstoragetypes.NewQueryClient(accessInf) + sq.protocolClient = protocoltypes.NewQueryClient(accessInf) + sq.downtimeClient = downtimev1.NewQueryClient(accessInf) + sq.tendermintRPC = accessInf + sq.TendermintRPC = accessInf +} + +func (sq *StateQuery) Provider(ctx context.Context, in *pairingtypes.QueryProviderRequest, opts ...grpc.CallOption) (*pairingtypes.QueryProviderResponse, error) { + return sq.pairingQueryClient.Provider(ctx, in, opts...) +} + +func (sq *StateQuery) GetSpecQueryClient() spectypes.QueryClient { + return sq.specQueryClient +} + func (csq *StateQuery) GetProtocolVersion(ctx context.Context) (*ProtocolVersionResponse, error) { header := metadata.MD{} - param, err := csq.ProtocolClient.Params(ctx, &protocoltypes.QueryParamsRequest{}, grpc.Header(&header)) + param, err := csq.protocolClient.Params(ctx, &protocoltypes.QueryParamsRequest{}, grpc.Header(&header)) if err != nil { return nil, err } @@ -76,7 +113,7 @@ func (csq *StateQuery) GetProtocolVersion(ctx context.Context) (*ProtocolVersion } func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes.Spec, error) { - spec, err := csq.SpecQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ + spec, err := csq.specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ ChainID: chainID, }) if err != nil { @@ -86,7 +123,7 @@ func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes. } func (csq *StateQuery) GetDowntimeParams(ctx context.Context) (*downtimev1.Params, error) { - res, err := csq.DowntimeClient.QueryParams(ctx, &downtimev1.QueryParamsRequest{}) + res, err := csq.downtimeClient.QueryParams(ctx, &downtimev1.QueryParamsRequest{}) if err != nil { return nil, err } @@ -94,13 +131,13 @@ func (csq *StateQuery) GetDowntimeParams(ctx context.Context) (*downtimev1.Param } type ConsumerStateQuery struct { - StateQuery - clientCtx client.Context + *StateQuery + fromAddress string lastChainID string } func NewConsumerStateQuery(ctx context.Context, clientCtx client.Context) *ConsumerStateQuery { - csq := &ConsumerStateQuery{StateQuery: *NewStateQuery(ctx, clientCtx), clientCtx: clientCtx, lastChainID: ""} + csq := &ConsumerStateQuery{StateQuery: NewStateQuery(ctx, NewStateQueryAccessInst(clientCtx)), fromAddress: clientCtx.FromAddress.String(), lastChainID: ""} return csq } @@ -114,7 +151,7 @@ func (csq *ConsumerStateQuery) GetEffectivePolicy(ctx context.Context, consumerA } } - resp, err := csq.PairingQueryClient.EffectivePolicy(ctx, &pairingtypes.QueryEffectivePolicyRequest{ + resp, err := csq.pairingQueryClient.EffectivePolicy(ctx, &pairingtypes.QueryEffectivePolicyRequest{ Consumer: consumerAddress, SpecID: specID, }) @@ -141,9 +178,9 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l } } - pairingResp, err := csq.PairingQueryClient.GetPairing(ctx, &pairingtypes.QueryGetPairingRequest{ + pairingResp, err := csq.pairingQueryClient.GetPairing(ctx, &pairingtypes.QueryGetPairingRequest{ ChainID: chainID, - Client: csq.clientCtx.FromAddress.String(), + Client: csq.fromAddress, }) if err != nil { return nil, 0, 0, utils.LavaFormatError("Failed in get pairing query", err, utils.Attribute{}) @@ -154,7 +191,7 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l utils.LavaFormatWarning("Chain returned empty provider list, check node connection and consumer subscription status, or no providers provide this chain", nil, utils.LogAttr("chainId", chainID), utils.LogAttr("epoch", pairingResp.CurrentEpoch), - utils.LogAttr("consumer_address", csq.clientCtx.FromAddress.String()), + utils.LogAttr("consumer_address", csq.fromAddress), ) } return pairingResp.Providers, pairingResp.CurrentEpoch, pairingResp.BlockOfNextPairing, nil @@ -175,8 +212,8 @@ func (csq *ConsumerStateQuery) GetMaxCUForUser(ctx context.Context, chainID stri } if userEntryRes == nil { - address := csq.clientCtx.FromAddress.String() - userEntryRes, err = csq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: epoch}) + address := csq.fromAddress + userEntryRes, err = csq.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: epoch}) if err != nil { return 0, utils.LavaFormatError("failed querying StakeEntry for consumer", err, utils.Attribute{Key: "chainID", Value: chainID}, utils.Attribute{Key: "address", Value: address}, utils.Attribute{Key: "block", Value: epoch}) } @@ -196,7 +233,7 @@ type EpochStateQuery struct { } func (esq *EpochStateQuery) CurrentEpochStart(ctx context.Context) (uint64, error) { - epochDetails, err := esq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) + epochDetails, err := esq.epochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { return 0, utils.LavaFormatError("Failed Querying EpochDetails", err) } @@ -209,15 +246,14 @@ func NewEpochStateQuery(stateQuery *StateQuery) *EpochStateQuery { } type ProviderStateQuery struct { - StateQuery + *StateQuery EpochStateQuery - clientCtx client.Context } -func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *ProviderStateQuery { - sq := NewStateQuery(ctx, clientCtx) +func NewProviderStateQuery(ctx context.Context, stateQueryAccess StateQueryAccessInf) *ProviderStateQuery { + sq := NewStateQuery(ctx, stateQueryAccess) esq := NewEpochStateQuery(sq) - csq := &ProviderStateQuery{StateQuery: *sq, EpochStateQuery: *esq, clientCtx: clientCtx} + csq := &ProviderStateQuery{StateQuery: sq, EpochStateQuery: *esq} return csq } @@ -233,7 +269,7 @@ func (psq *ProviderStateQuery) GetMaxCuForUser(ctx context.Context, consumerAddr } } if userEntryRes == nil { - userEntryRes, err = psq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) + userEntryRes, err = psq.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) if err != nil { return 0, utils.LavaFormatError("StakeEntry querying for consumer failed", err, utils.Attribute{Key: "chainID", Value: chainID}, utils.Attribute{Key: "address", Value: consumerAddress}, utils.Attribute{Key: "block", Value: epoch}) } @@ -248,10 +284,7 @@ func (psq *ProviderStateQuery) entryKey(consumerAddress, chainID string, epoch u } func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64) (votes []*reliabilitymanager.VoteParams, err error) { - brp, err := TryIntoTendermintRPC(psq.clientCtx.Client) - if err != nil { - return nil, utils.LavaFormatError("failed to get block result provider", err) - } + brp := psq.StateQuery.tendermintRPC blockResults, err := brp.BlockResults(ctx, &latestBlock) if err != nil { return nil, err @@ -311,7 +344,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres } } if verifyResponse == nil { - verifyResponse, err = psq.PairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ + verifyResponse, err = psq.pairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ ChainID: chainID, Client: consumerAddress, Provider: providerAddress, @@ -334,7 +367,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres } func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) { - res, err := psq.EpochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) + res, err := psq.epochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) if err != nil { return 0, err } @@ -342,7 +375,7 @@ func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) } func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint64, error) { - res, err := psq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) + res, err := psq.epochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { return 0, err } @@ -350,7 +383,7 @@ func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint6 } func (psq *ProviderStateQuery) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { - res, err := psq.PairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) + res, err := psq.pairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) if err != nil { return 0, err } From 938efd01e3a34dfa08bbc977dafc09090528554a Mon Sep 17 00:00:00 2001 From: oren-lava <111131399+oren-lava@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:59:31 +0200 Subject: [PATCH 11/18] chore: Fantom spec update (#1814) * remove redundant ETH api * add missing ftm api --- cookbook/specs/fantom.json | 405 +++++++++++++++++-------------------- 1 file changed, 184 insertions(+), 221 deletions(-) diff --git a/cookbook/specs/fantom.json b/cookbook/specs/fantom.json index 2ce468de09..01470e7bfa 100644 --- a/cookbook/specs/fantom.json +++ b/cookbook/specs/fantom.json @@ -32,17 +32,17 @@ }, "apis": [ { - "name": "ftm_chainId", + "name": "ftm_accounts", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 1, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -50,25 +50,25 @@ "extra_compute_units": 0 }, { - "name": "ftm_blockNumber", + "name": "ftm_chainId", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_subscribe", + "name": "ftm_blockNumber", "block_parsing": { "parser_arg": [ "" @@ -86,35 +86,35 @@ "extra_compute_units": 0 }, { - "name": "ftm_unsubscribe", + "name": "ftm_coinbase", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 19, + "compute_units": 10, "enabled": true, "category": { "deterministic": false, - "local": true, - "subscription": true, + "local": false, + "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_feeHistory", + "name": "ftm_syncing", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 19, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -122,50 +122,50 @@ "extra_compute_units": 0 }, { - "name": "ftm_maxPriorityFeePerGas", + "name": "ftm_subscribe", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 21, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_createAccessList", + "name": "ftm_unsubscribe", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 16, + "compute_units": 19, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getTransactionReceipt", + "name": "ftm_feeHistory", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 20, + "compute_units": 19, "enabled": true, "category": { "deterministic": true, @@ -176,14 +176,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByBlockHashAndIndex", + "name": "ftm_maxPriorityFeePerGas", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 20, + "compute_units": 21, "enabled": true, "category": { "deterministic": true, @@ -194,17 +194,17 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByBlockNumberAndIndex", + "name": "ftm_newBlockFilter", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 19, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -212,36 +212,35 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBlockByNumber", + "name": "ftm_newFilter", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 75, - "enabled": false, + "compute_units": 20, + "enabled": true, "category": { "deterministic": false, - "local": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getStorageAt", + "name": "ftm_newPendingTransactionFilter", "block_parsing": { "parser_arg": [ - "0", - "toBlock" + "latest" ], - "parser_func": "PARSE_CANONICAL" + "parser_func": "DEFAULT" }, - "compute_units": 75, + "compute_units": 20, "enabled": true, "category": { - "deterministic": true, + "deterministic": false, "local": false, "subscription": false, "stateful": 0 @@ -249,32 +248,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionByHash", + "name": "ftm_uninstallFilter", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 21, + "compute_units": 10, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_gasPrice", + "name": "ftm_createAccessList", "block_parsing": { "parser_arg": [ - "2" + "0" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 17, + "compute_units": 16, "enabled": true, "category": { "deterministic": true, @@ -285,14 +284,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBalance", + "name": "ftm_getTransactionReceipt", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -303,14 +302,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getCode", + "name": "ftm_getTransactionByBlockHashAndIndex", "block_parsing": { "parser_arg": [ "0" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -321,14 +320,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_sign", + "name": "ftm_getTransactionByBlockNumberAndIndex", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 17, + "compute_units": 19, "enabled": true, "category": { "deterministic": true, @@ -339,32 +338,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_signTransaction", + "name": "ftm_getBlockByNumber", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 26, - "enabled": true, + "compute_units": 75, + "enabled": false, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_sendTransaction", + "name": "ftm_getBlockReceipts", "block_parsing": { "parser_arg": [ - "" + "0" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 15, + "compute_units": 20, "enabled": true, "category": { "deterministic": true, @@ -375,68 +374,69 @@ "extra_compute_units": 0 }, { - "name": "ftm_getBlockTransactionCountByHash", + "name": "ftm_getStorageAt", "block_parsing": { "parser_arg": [ - "" + "0", + "toBlock" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_CANONICAL" }, - "compute_units": 20, + "compute_units": 75, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getBlockTransactionCountByNumber", + "name": "ftm_getTransactionByHash", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 250, + "compute_units": 21, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getProof", + "name": "ftm_gasPrice", "block_parsing": { "parser_arg": [ - "" + "2" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 17, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_getBlockByHash", + "name": "ftm_getBalance", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -447,14 +447,14 @@ "extra_compute_units": 0 }, { - "name": "ftm_getTransactionCount", + "name": "ftm_getCode", "block_parsing": { "parser_arg": [ - "" + "0" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -465,32 +465,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_call", + "name": "ftm_getFilterChanges", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, + "compute_units": 20, "enabled": true, "category": { "deterministic": false, - "local": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "ftm_estimateGas", + "name": "ftm_getFilterLogs", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, - "compute_units": 87, + "compute_units": 60, "enabled": true, "category": { "deterministic": false, @@ -501,32 +501,32 @@ "extra_compute_units": 0 }, { - "name": "ftm_sendRawTransaction", + "name": "ftm_getLogs", "block_parsing": { "parser_arg": [ - "1" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 26, + "compute_units": 60, "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "net_version", + "name": "ftm_getUncleByBlockHashAndIndex", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -537,14 +537,14 @@ "extra_compute_units": 0 }, { - "name": "net_listening", + "name": "ftm_getUncleByBlockNumberAndIndex", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, + "compute_units": 15, "enabled": true, "category": { "deterministic": true, @@ -555,35 +555,35 @@ "extra_compute_units": 0 }, { - "name": "rpc_modules", + "name": "ftm_getUncleCountByBlockHash", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, + "compute_units": 15, "enabled": true, "category": { - "deterministic": false, - "local": true, + "deterministic": true, + "local": false, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_accounts", + "name": "ftm_getUncleCountByBlockNumber", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 15, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -591,17 +591,17 @@ "extra_compute_units": 0 }, { - "name": "eth_coinbase", + "name": "ftm_sign", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, - "enabled": false, + "compute_units": 17, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -609,15 +609,15 @@ "extra_compute_units": 0 }, { - "name": "eth_compileLLL", + "name": "ftm_signTransaction", "block_parsing": { "parser_arg": [ "1" ], "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, - "enabled": false, + "compute_units": 26, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -627,17 +627,17 @@ "extra_compute_units": 0 }, { - "name": "eth_getCompilers", + "name": "ftm_sendTransaction", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 15, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 @@ -645,87 +645,69 @@ "extra_compute_units": 0 }, { - "name": "eth_getFilterChanges", + "name": "ftm_getBlockTransactionCountByHash", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 20, - "enabled": false, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleByBlockHashAndIndex", + "name": "ftm_getBlockTransactionCountByNumber", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 20, - "enabled": false, - "category": { - "deterministic": true, - "local": false, - "subscription": false, - "stateful": 0 - }, - "extra_compute_units": 0 - }, - { - "name": "eth_getUncleByBlockNumberAndIndex", - "block_parsing": { - "parser_arg": [ - "0" - ], - "parser_func": "PARSE_BY_ARG" - }, - "compute_units": 20, - "enabled": false, + "compute_units": 250, + "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleCountByBlockHash", + "name": "ftm_getProof", "block_parsing": { "parser_arg": [ - "2" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, - "compute_units": 20, - "enabled": false, + "compute_units": 10, + "enabled": true, "category": { - "deterministic": true, - "local": false, + "deterministic": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_getUncleCountByBlockNumber", + "name": "ftm_getBlockByHash", "block_parsing": { "parser_arg": [ - "0" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -735,15 +717,15 @@ "extra_compute_units": 0 }, { - "name": "eth_getWork", + "name": "ftm_getTransactionCount", "block_parsing": { "parser_arg": [ - "1" + "" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, + "enabled": true, "category": { "deterministic": true, "local": false, @@ -753,25 +735,25 @@ "extra_compute_units": 0 }, { - "name": "eth_hashrate", + "name": "ftm_call", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 1, - "enabled": false, + "compute_units": 10, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_mining", + "name": "ftm_hashrate", "block_parsing": { "parser_arg": [ "" @@ -779,44 +761,25 @@ "parser_func": "EMPTY" }, "compute_units": 10, - "enabled": false, - "category": { - "deterministic": false, - "local": false, - "subscription": false, - "stateful": 0 - }, - "extra_compute_units": 0 - }, - { - "name": "eth_newFilter", - "block_parsing": { - "parser_arg": [ - "0", - "toBlock" - ], - "parser_func": "PARSE_CANONICAL" - }, - "compute_units": 20, - "enabled": false, + "enabled": true, "category": { "deterministic": false, - "local": false, + "local": true, "subscription": false, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "eth_protocolVersion", + "name": "ftm_estimateGas", "block_parsing": { "parser_arg": [ "" ], "parser_func": "EMPTY" }, - "compute_units": 10, - "enabled": false, + "compute_units": 87, + "enabled": true, "category": { "deterministic": false, "local": true, @@ -826,17 +789,17 @@ "extra_compute_units": 0 }, { - "name": "eth_syncing", + "name": "ftm_sendRawTransaction", "block_parsing": { "parser_arg": [ - "" + "1" ], - "parser_func": "EMPTY" + "parser_func": "PARSE_BY_ARG" }, - "compute_units": 10, - "enabled": false, + "compute_units": 26, + "enabled": true, "category": { - "deterministic": false, + "deterministic": true, "local": false, "subscription": false, "stateful": 0 From 8b16323eacb21aef70619d1c9ed269aeee0eed33 Mon Sep 17 00:00:00 2001 From: oren-lava <111131399+oren-lava@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:01:02 +0200 Subject: [PATCH 12/18] chore: update Solana spec (#1806) * update solana spec * update READMEs * fix solana spec * remove subscription APIs * chore: Solana Subscription APIs (#1808) * add subscription APIs to solana spec * minor fix for multiple SUBSCRIBE tags * fix unsubscribe methods * change solana unsubscribe params from %s to %d --------- Co-authored-by: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Co-authored-by: Yaroms <103432884+Yaroms@users.noreply.github.com> --- cookbook/README.md | 4 +- cookbook/specs/solana.json | 351 ++++++++++++++++++++----- protocol/chainlib/base_chain_parser.go | 18 +- x/spec/README.md | 8 +- 4 files changed, 309 insertions(+), 72 deletions(-) mode change 100644 => 100755 cookbook/specs/solana.json diff --git a/cookbook/README.md b/cookbook/README.md index 2fa9633235..2fc5f829bf 100644 --- a/cookbook/README.md +++ b/cookbook/README.md @@ -26,7 +26,7 @@ Lava has many specs and participants can add and modify specs using governance p | reliability_threshold | Threshold for VRF to decide when to do a data reliability check (i.e. re-execute query with another provider). Currently set to `268435455` on all specs resulting in a `1/16` ratio.| | data_reliability_enabled | True/False for data reliability on/off for this spec. | | block_distance_for_finalized_data | Blockchains like Ethereum have probabilistic finality, this threshold sets what we expect to be a safe distance from the latest block (In eth it’s 7: i.e. any block bigger in distance than 7 from the latest block we consider final).| -| blocks_in_finalization_proof | Number of finalized blocks the provider keeps (from the chain he provides service for, not always Lava) for data reliability. | +| blocks_in_finalization_proof | Number of finalized blocks the provider keeps (from the chain he provides service for, not always Lava) for data reliability. Normally, this value should be: 1sec / average_block_time | | average_block_time | Average block time on this blockchain, used for estimating time of future blocks. | | allowed_block_lag_for_qos_sync | Lag used to calculate QoS for providers. this should be `(10000 (10 seconds) / average_block_time) AND bigger than 1`, beyond this distance the data is considered stale and irrelevant. | | block_last_updated | The latest block in which the spec was updated. | @@ -65,7 +65,7 @@ Lava has many specs and participants can add and modify specs using governance p | deterministic| True/False. If an API is deterministic (executing the API twice in the same block will have the same result, which means different providers are supposed to get the same result), we can run data reliability checks on it. | | local | True/False. Marks an API that is local to the node (like subscription APIs, which are not relevant to other nodes) | | subscription | True/False. Marks a subscription API. Requires an active connection to a node to get data pushed from a provider. | -| stateful | Requires local storage on the provider’s node. | +| stateful | True for transaction APIs. | | hanging_api | True/False. Marks an API that is dependent on a creation of a new block (so the API hangs until this happens). | ### How to propose a new spec? diff --git a/cookbook/specs/solana.json b/cookbook/specs/solana.json old mode 100644 new mode 100755 index e14203dd39..5089de1331 --- a/cookbook/specs/solana.json +++ b/cookbook/specs/solana.json @@ -479,7 +479,7 @@ "extra_compute_units": 0 }, { - "name": "getMinimumBalanceForRentExemption", + "name": "getMaxShredInsertSlot", "block_parsing": { "parser_arg": [ "latest" @@ -497,7 +497,7 @@ "extra_compute_units": 0 }, { - "name": "getMultipleAccounts", + "name": "getMinimumBalanceForRentExemption", "block_parsing": { "parser_arg": [ "latest" @@ -515,7 +515,7 @@ "extra_compute_units": 0 }, { - "name": "getProgramAccounts", + "name": "getMultipleAccounts", "block_parsing": { "parser_arg": [ "latest" @@ -533,7 +533,7 @@ "extra_compute_units": 0 }, { - "name": "getRecentPerformanceSamples", + "name": "getProgramAccounts", "block_parsing": { "parser_arg": [ "latest" @@ -551,7 +551,7 @@ "extra_compute_units": 0 }, { - "name": "getRecentPrioritizationFees", + "name": "getRecentPerformanceSamples", "block_parsing": { "parser_arg": [ "latest" @@ -569,7 +569,7 @@ "extra_compute_units": 0 }, { - "name": "getSignaturesForAddress", + "name": "getRecentPrioritizationFees", "block_parsing": { "parser_arg": [ "latest" @@ -587,7 +587,7 @@ "extra_compute_units": 0 }, { - "name": "getSignatureStatuses", + "name": "getSignaturesForAddress", "block_parsing": { "parser_arg": [ "latest" @@ -605,7 +605,7 @@ "extra_compute_units": 0 }, { - "name": "getSlot", + "name": "getSignatureStatuses", "block_parsing": { "parser_arg": [ "latest" @@ -623,7 +623,7 @@ "extra_compute_units": 0 }, { - "name": "getSlotLeader", + "name": "getSlot", "block_parsing": { "parser_arg": [ "latest" @@ -641,7 +641,7 @@ "extra_compute_units": 0 }, { - "name": "getSlotLeaders", + "name": "getSlotLeader", "block_parsing": { "parser_arg": [ "latest" @@ -659,7 +659,7 @@ "extra_compute_units": 0 }, { - "name": "getStakeActivation", + "name": "getSlotLeaders", "block_parsing": { "parser_arg": [ "latest" @@ -966,181 +966,325 @@ "extra_compute_units": 0 }, { - "name": "getConfirmedBlock", + "name": "accountSubscribe", "block_parsing": { "parser_arg": [ - "0" + "latest" ], - "parser_func": "PARSE_BY_ARG" + "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedBlocks", + "name": "accountUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedBlocksWithLimit", + "name": "blockSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedSignaturesForAddress2", + "name": "blockUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getConfirmedTransaction", + "name": "logsSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFeeCalculatorForBlockhash", + "name": "logsUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFeeRateGovernor", + "name": "programSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getFees", + "name": "programUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getRecentBlockhash", + "name": "rootSubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 }, { - "name": "getSnapshotSlot", + "name": "rootUnsubscribe", "block_parsing": { "parser_arg": [ "latest" ], "parser_func": "DEFAULT" }, - "compute_units": 10, + "compute_units": 1000, "enabled": true, "category": { - "deterministic": true, - "local": false, - "subscription": false, + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "signatureSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "signatureUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotsUpdatesSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "slotsUpdatesUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "voteSubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, + "stateful": 0 + }, + "extra_compute_units": 0 + }, + { + "name": "voteUnsubscribe", + "block_parsing": { + "parser_arg": [ + "latest" + ], + "parser_func": "DEFAULT" + }, + "compute_units": 1000, + "enabled": true, + "category": { + "deterministic": false, + "local": true, + "subscription": true, "stateful": 0 }, "extra_compute_units": 0 @@ -1174,6 +1318,87 @@ "encoding": "base64" }, "api_name": "getBlock" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "accountSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"accountUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "accountUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "blockSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"blockUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "blockUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "logsSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"logsUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "logsUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "programSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"programUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "programUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "rootSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"rootUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "rootUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "signatureSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"signatureUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "signatureUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "slotSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"slotUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "slotUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "slotsUpdatesSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"slotsUpdatesUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "slotsUpdatesUnsubscribe" + }, + { + "function_tag": "SUBSCRIBE", + "api_name": "voteSubscribe" + }, + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"voteUnsubscribe\",\"params\":[%d],\"id\":1}", + "function_tag": "UNSUBSCRIBE", + "api_name": "voteUnsubscribe" } ], "verifications": [ diff --git a/protocol/chainlib/base_chain_parser.go b/protocol/chainlib/base_chain_parser.go index fa730cde48..6cc5e8ea24 100644 --- a/protocol/chainlib/base_chain_parser.go +++ b/protocol/chainlib/base_chain_parser.go @@ -429,9 +429,21 @@ func getServiceApis( } for _, parsing := range apiCollection.ParseDirectives { - taggedApis[parsing.FunctionTag] = TaggedContainer{ - Parsing: parsing, - ApiCollection: apiCollection, + // We do this because some specs may have multiple parse directives + // with the same tag - SUBSCRIBE (like in Solana). + // + // Since the function tag is not used for handling the subscription flow, + // we can ignore the extra parse directives and take only the first one. The + // subscription flow is handled by the consumer websocket manager and the chain router + // that uses the api collection to fetch the correct parse directive. + // + // The only place the SUBSCRIBE tag is checked against the taggedApis map is in the chain parser with GetParsingByTag. + // But there, we're not interested in the parse directive, only if the tag is present. + if _, ok := taggedApis[parsing.FunctionTag]; !ok { + taggedApis[parsing.FunctionTag] = TaggedContainer{ + Parsing: parsing, + ApiCollection: apiCollection, + } } } diff --git a/x/spec/README.md b/x/spec/README.md index 6f43a1f00d..9a6f09bd2f 100644 --- a/x/spec/README.md +++ b/x/spec/README.md @@ -53,8 +53,8 @@ type Spec struct { BlockLastUpdated uint64 // the last block this spec was updated on chain ReliabilityThreshold uint32 // this determines the probability of data reliability checks by the consumer DataReliabilityEnabled bool // enables/disables data reliability for the chain - BlockDistanceForFinalizedData uint32 - BlocksInFinalizationProof uint32 + BlockDistanceForFinalizedData uint32 // number of finalized blocks a provider keeps for data reliability + BlocksInFinalizationProof uint32 // number of blocks for finalization } ``` `Coin` type is from Cosmos-SDK (`cosmos.base.v1beta1.Coin`). @@ -157,9 +157,9 @@ This struct defines properties of an api. ```go type SpecCategory struct { Deterministic bool // if this api have the same response across nodes - Local bool // TBD + Local bool // specific to the local node (like node info query) Subscription bool // subscription base api - Stateful uint32 // TBD + Stateful uint32 // true for transaction APIs HangingApi bool // marks this api with longer timeout } ``` From b4bd381efd819346f68064772c1b7dc09805296a Mon Sep 17 00:00:00 2001 From: Omer <100387053+omerlavanet@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:27:41 +0200 Subject: [PATCH 13/18] feat: add lava over lava secondary transport (#1769) * refactor state query access * remove direct usage of client.Context to allow the rewiring of lava over lava * refactor rpcconsumer, allow creating a server with a function * lint * added custom lava transport * add lava over lava secondary transport * lint * added initialization condition * relaysMonitor is dependent on metrics, so put the functionality in rpcconsumer server * added metrics * add vote test script * fix lint * added support for e2e * oops brackets * added secondary transport startup --------- Co-authored-by: Ran Mishael --- protocol/chainlib/base_chain_parser.go | 51 +++++++++++++++++ protocol/chainlib/chainlib.go | 26 +++++++++ protocol/chainlib/tendermintRPC.go | 6 ++ protocol/lavasession/provider_types.go | 2 +- protocol/metrics/consumer_metrics_manager.go | 27 +++++++++ protocol/metrics/rpcconsumer_logs.go | 7 +++ protocol/rpcconsumer/custom_transport.go | 47 +++++++++++++-- protocol/rpcconsumer/rpcconsumer.go | 60 +++++++++++++++++++- protocol/rpcconsumer/rpcconsumer_server.go | 36 +++++++++++- protocol/statetracker/state_tracker.go | 15 ++++- scripts/test/vote_test.sh | 31 ++++++++++ 11 files changed, 295 insertions(+), 13 deletions(-) create mode 100755 scripts/test/vote_test.sh diff --git a/protocol/chainlib/base_chain_parser.go b/protocol/chainlib/base_chain_parser.go index 6cc5e8ea24..1017fb22c0 100644 --- a/protocol/chainlib/base_chain_parser.go +++ b/protocol/chainlib/base_chain_parser.go @@ -3,6 +3,8 @@ package chainlib import ( "errors" "fmt" + "io" + "net/http" "regexp" "strings" "sync" @@ -356,6 +358,55 @@ func (apip *BaseChainParser) isValidInternalPath(path string) bool { return ok } +// take an http request and direct it through the consumer +func (apip *BaseChainParser) ExtractDataFromRequest(request *http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) { + // Extract relative URL path + url = request.URL.Path + // Extract connection type + connectionType = request.Method + + // Extract metadata + for key, values := range request.Header { + for _, value := range values { + metadata = append(metadata, pairingtypes.Metadata{ + Name: key, + Value: value, + }) + } + } + + // Extract data + if request.Body != nil { + bodyBytes, err := io.ReadAll(request.Body) + if err != nil { + return "", "", "", nil, err + } + data = string(bodyBytes) + } + + return url, data, connectionType, metadata, nil +} + +func (apip *BaseChainParser) SetResponseFromRelayResult(relayResult *common.RelayResult) (*http.Response, error) { + if relayResult == nil { + return nil, errors.New("relayResult is nil") + } + response := &http.Response{ + StatusCode: relayResult.StatusCode, + Header: make(http.Header), + } + + for _, values := range relayResult.Reply.Metadata { + response.Header.Add(values.Name, values.Value) + } + + if relayResult.Reply != nil && relayResult.Reply.Data != nil { + response.Body = io.NopCloser(strings.NewReader(string(relayResult.Reply.Data))) + } + + return response, nil +} + // getSupportedApi fetches service api from spec by name func (apip *BaseChainParser) getApiCollection(connectionType, internalPath, addon string) (*spectypes.ApiCollection, error) { // Guard that the GrpcChainParser instance exists diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 49a90af1db..5c3fbd9b7f 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -3,6 +3,7 @@ package chainlib import ( "context" "fmt" + "net/http" "time" "github.com/lavanet/lava/v4/protocol/chainlib/chainproxy/rpcInterfaceMessages" @@ -11,10 +12,15 @@ import ( "github.com/lavanet/lava/v4/protocol/common" "github.com/lavanet/lava/v4/protocol/lavasession" "github.com/lavanet/lava/v4/protocol/metrics" + "github.com/lavanet/lava/v4/utils" pairingtypes "github.com/lavanet/lava/v4/x/pairing/types" spectypes "github.com/lavanet/lava/v4/x/spec/types" ) +const ( + INTERNAL_ADDRESS = "internal-addr" +) + var ( IgnoreSubscriptionNotConfiguredError = true IgnoreSubscriptionNotConfiguredErrorFlag = "ignore-subscription-not-configured-error" @@ -44,6 +50,10 @@ func NewChainListener( refererData *RefererData, consumerWsSubscriptionManager *ConsumerWSSubscriptionManager, ) (ChainListener, error) { + if listenEndpoint.NetworkAddress == INTERNAL_ADDRESS { + utils.LavaFormatDebug("skipping chain listener for internal address") + return NewEmptyChainListener(), nil + } switch listenEndpoint.ApiInterface { case spectypes.APIInterfaceJsonRPC: return NewJrpcChainListener(ctx, listenEndpoint, relaySender, healthReporter, rpcConsumerLogs, refererData, consumerWsSubscriptionManager), nil @@ -76,6 +86,8 @@ type ChainParser interface { UpdateBlockTime(newBlockTime time.Duration) GetUniqueName() string ExtensionsParser() *extensionslib.ExtensionParser + ExtractDataFromRequest(*http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) + SetResponseFromRelayResult(*common.RelayResult) (*http.Response, error) } type ChainMessage interface { @@ -173,3 +185,17 @@ func GetChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint *lavas } return newChainRouter(ctx, nConns, *rpcProviderEndpoint, chainParser, proxyConstructor) } + +type EmptyChainListener struct{} + +func NewEmptyChainListener() ChainListener { + return &EmptyChainListener{} +} + +func (*EmptyChainListener) Serve(ctx context.Context, cmdFlags common.ConsumerCmdFlags) { + // do nothing +} + +func (*EmptyChainListener) GetListeningAddress() string { + return "" +} diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 6844bbaa24..ad35dbc517 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -275,6 +275,12 @@ func (*TendermintChainParser) newBatchChainMessage(serviceApi *spectypes.Api, re return nodeMsg, err } +// overwritten because tendermintrpc doesnt use POST but an empty connecionType +func (apip *TendermintChainParser) ExtractDataFromRequest(request *http.Request) (url string, data string, connectionType string, metadata []pairingtypes.Metadata, err error) { + url, data, _, metadata, err = apip.BaseChainParser.ExtractDataFromRequest(request) + return url, data, "", metadata, err +} + func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.Api, requestedBlock int64, requestedHashes []string, msg *rpcInterfaceMessages.TendermintrpcMessage, apiCollection *spectypes.ApiCollection, usedDefaultValue bool) *baseChainMessageContainer { nodeMsg := &baseChainMessageContainer{ api: serviceApi, diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 955abe4b3d..7cc985e1da 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -50,7 +50,7 @@ func (endpoint *RPCProviderEndpoint) AddonsString() string { } func (endpoint *RPCProviderEndpoint) String() string { - return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress.Address + " Node: " + endpoint.UrlsString() + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + " Addons:" + endpoint.AddonsString() + return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress.Address + " Node:" + endpoint.UrlsString() + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + " Addons:" + endpoint.AddonsString() } func (endpoint *RPCProviderEndpoint) Validate() error { diff --git a/protocol/metrics/consumer_metrics_manager.go b/protocol/metrics/consumer_metrics_manager.go index 9a77678a4d..ae4ee74319 100644 --- a/protocol/metrics/consumer_metrics_manager.go +++ b/protocol/metrics/consumer_metrics_manager.go @@ -45,6 +45,8 @@ type ConsumerMetricsManager struct { totalFailedWsSubscriptionRequestsMetric *prometheus.CounterVec totalWsSubscriptionDissconnectMetric *prometheus.CounterVec totalDuplicatedWsSubscriptionRequestsMetric *prometheus.CounterVec + totalLoLSuccessMetric prometheus.Counter + totalLoLErrorsMetric prometheus.Counter totalWebSocketConnectionsActive *prometheus.GaugeVec blockMetric *prometheus.GaugeVec latencyMetric *prometheus.GaugeVec @@ -118,6 +120,16 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM Help: "The total number of duplicated webscket subscription requests over time per chain id per api interface.", }, []string{"spec", "apiInterface"}) + totalLoLSuccessMetric := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "lava_consumer_total_lol_successes", + Help: "The total number of requests sent to lava over lava successfully", + }) + + totalLoLErrorsMetric := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "lava_consumer_total_lol_errors", + Help: "The total number of requests sent to lava over lava and failed", + }) + totalWebSocketConnectionsActive := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "lava_consumer_total_websocket_connections_active", Help: "The total number of currently active websocket connections with users", @@ -241,6 +253,8 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM prometheus.MustRegister(totalFailedWsSubscriptionRequestsMetric) prometheus.MustRegister(totalDuplicatedWsSubscriptionRequestsMetric) prometheus.MustRegister(totalWsSubscriptionDissconnectMetric) + prometheus.MustRegister(totalLoLSuccessMetric) + prometheus.MustRegister(totalLoLErrorsMetric) consumerMetricsManager := &ConsumerMetricsManager{ totalCURequestedMetric: totalCURequestedMetric, @@ -274,6 +288,8 @@ func NewConsumerMetricsManager(options ConsumerMetricsManagerOptions) *ConsumerM relayProcessingLatencyBeforeProvider: relayProcessingLatencyBeforeProvider, relayProcessingLatencyAfterProvider: relayProcessingLatencyAfterProvider, averageProcessingLatency: map[string]*LatencyTracker{}, + totalLoLSuccessMetric: totalLoLSuccessMetric, + totalLoLErrorsMetric: totalLoLErrorsMetric, consumerOptimizerQoSClient: options.ConsumerOptimizerQoSClient, } @@ -565,6 +581,17 @@ func (pme *ConsumerMetricsManager) SetWsSubscriptioDisconnectRequestMetric(chain pme.totalWsSubscriptionDissconnectMetric.WithLabelValues(chainId, apiInterface, disconnectReason).Inc() } +func (pme *ConsumerMetricsManager) SetLoLResponse(success bool) { + if pme == nil { + return + } + if success { + pme.totalLoLSuccessMetric.Inc() + } else { + pme.totalLoLErrorsMetric.Inc() + } +} + func (pme *ConsumerMetricsManager) handleOptimizerQoS(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) diff --git a/protocol/metrics/rpcconsumer_logs.go b/protocol/metrics/rpcconsumer_logs.go index d09f988716..0bc0359384 100644 --- a/protocol/metrics/rpcconsumer_logs.go +++ b/protocol/metrics/rpcconsumer_logs.go @@ -92,6 +92,13 @@ func NewRPCConsumerLogs(consumerMetricsManager *ConsumerMetricsManager, consumer return rpcConsumerLogs, err } +func (rpccl *RPCConsumerLogs) SetLoLResponse(success bool) { + if rpccl == nil { + return + } + rpccl.consumerMetricsManager.SetLoLResponse(success) +} + func (rpccl *RPCConsumerLogs) SetWebSocketConnectionActive(chainId string, apiInterface string, add bool) { rpccl.consumerMetricsManager.SetWebSocketConnectionActive(chainId, apiInterface, add) } diff --git a/protocol/rpcconsumer/custom_transport.go b/protocol/rpcconsumer/custom_transport.go index aef36b3396..415fea1f85 100644 --- a/protocol/rpcconsumer/custom_transport.go +++ b/protocol/rpcconsumer/custom_transport.go @@ -2,22 +2,57 @@ package rpcconsumer import ( "net/http" + "sync" + "sync/atomic" + + "github.com/lavanet/lava/v4/utils" ) type CustomLavaTransport struct { - transport http.RoundTripper + transport http.RoundTripper + lock sync.RWMutex + secondaryTransport http.RoundTripper + consecutiveFails atomic.Uint64 // TODO: export to metrics +} + +func NewCustomLavaTransport(httpTransport http.RoundTripper, secondaryTransport http.RoundTripper) *CustomLavaTransport { + return &CustomLavaTransport{transport: httpTransport, secondaryTransport: secondaryTransport} } -func NewCustomLavaTransport(httpTransport http.RoundTripper) *CustomLavaTransport { - return &CustomLavaTransport{transport: httpTransport} +func (c *CustomLavaTransport) SetSecondaryTransport(secondaryTransport http.RoundTripper) { + c.lock.Lock() + defer c.lock.Unlock() + utils.LavaFormatDebug("Setting secondary transport for CustomLavaTransport") + c.secondaryTransport = secondaryTransport +} + +// used to switch the primary and secondary transports, in case the primary one fails too much +func (c *CustomLavaTransport) TogglePrimarySecondaryTransport() { + c.lock.Lock() + defer c.lock.Unlock() + primaryTransport := c.transport + secondaryTransport := c.secondaryTransport + c.secondaryTransport = primaryTransport + c.transport = secondaryTransport } func (c *CustomLavaTransport) RoundTrip(req *http.Request) (*http.Response, error) { // Custom logic before the request - + c.lock.RLock() + primaryTransport := c.transport + secondaryTransport := c.secondaryTransport + c.lock.RUnlock() // Delegate to the underlying RoundTripper (usually http.Transport) - resp, err := c.transport.RoundTrip(req) - + resp, err := primaryTransport.RoundTrip(req) // Custom logic after the request + if err != nil { + c.consecutiveFails.Add(1) + // If the primary transport fails, use the secondary transport + if secondaryTransport != nil { + resp, err = secondaryTransport.RoundTrip(req) + } + } else { + c.consecutiveFails.Store(0) + } return resp, err } diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 62448fe557..94784cdd2e 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -49,6 +49,7 @@ const ( refererBackendAddressFlagName = "referer-be-address" refererMarkerFlagName = "referer-marker" reportsSendBEAddress = "reports-be-address" + LavaOverLavaBackupFlagName = "use-lava-over-lava-backup" ) var ( @@ -156,9 +157,11 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt } consumerMetricsManager.SetVersion(upgrade.GetCurrentVersion().ConsumerVersion) + var customLavaTransport *CustomLavaTransport httpClient, err := jsonrpcclient.DefaultHTTPClient(options.clientCtx.NodeURI) if err == nil { - httpClient.Transport = NewCustomLavaTransport(httpClient.Transport) + customLavaTransport = NewCustomLavaTransport(httpClient.Transport, nil) + httpClient.Transport = customLavaTransport client, err := rpchttp.NewWithClient(options.clientCtx.NodeURI, "/websocket", httpClient) if err == nil { options.clientCtx = options.clientCtx.WithClient(client) @@ -227,10 +230,25 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt for _, rpcEndpoint := range options.rpcEndpoints { go func(rpcEndpoint *lavasession.RPCEndpoint) error { defer wg.Done() - _, err := rpcc.CreateConsumerEndpoint(ctx, rpcEndpoint, errCh, consumerAddr, consumerStateTracker, + rpcConsumerServer, err := rpcc.CreateConsumerEndpoint(ctx, rpcEndpoint, errCh, consumerAddr, consumerStateTracker, policyUpdaters, optimizers, consumerConsistencies, finalizationConsensuses, chainMutexes, options, privKey, lavaChainID, rpcConsumerMetrics, consumerReportsManager, consumerOptimizerQoSClient, consumerMetricsManager, relaysMonitorAggregator) + if err == nil { + if customLavaTransport != nil && statetracker.IsLavaNativeSpec(rpcEndpoint.ChainID) && rpcEndpoint.ApiInterface == spectypes.APIInterfaceTendermintRPC { + // we can add lava over lava to the custom transport as a secondary source + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for range ticker.C { + if rpcConsumerServer.IsInitialized() { + customLavaTransport.SetSecondaryTransport(rpcConsumerServer) + return + } + } + }() + } + } return err }(rpcEndpoint) } @@ -634,6 +652,43 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 utils.LavaFormatFatal("offline spec modifications are supported only in single chain bootstrapping", nil, utils.LogAttr("len(rpcEndpoints)", len(rpcEndpoints)), utils.LogAttr("rpcEndpoints", rpcEndpoints)) } + if viper.GetBool(LavaOverLavaBackupFlagName) { + additionalEndpoint := func() *lavasession.RPCEndpoint { + for _, endpoint := range rpcEndpoints { + if statetracker.IsLavaNativeSpec(endpoint.ChainID) { + // native spec already exists, no need to add + return nil + } + } + // need to add an endpoint for the native lava chain + if strings.Contains(networkChainId, "mainnet") { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.MAINNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } else if strings.Contains(networkChainId, "testnet") { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.TESTNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } else if strings.Contains(networkChainId, "testnet") || networkChainId == "lava" { + return &lavasession.RPCEndpoint{ + NetworkAddress: chainlib.INTERNAL_ADDRESS, + ChainID: statetracker.TESTNET_SPEC, + ApiInterface: spectypes.APIInterfaceTendermintRPC, + } + } + utils.LavaFormatError("could not find a native lava chain for the current network", nil, utils.LogAttr("networkChainId", networkChainId)) + return nil + }() + if additionalEndpoint != nil { + utils.LavaFormatInfo("Lava over Lava backup is enabled", utils.Attribute{Key: "additionalEndpoint", Value: additionalEndpoint.ChainID}) + rpcEndpoints = append(rpcEndpoints, additionalEndpoint) + } + } + rpcConsumerSharedState := viper.GetBool(common.SharedStateFlag) err = rpcConsumer.Start(ctx, &rpcConsumerStartOptions{ txFactory, @@ -699,6 +754,7 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 cmdRPCConsumer.Flags().DurationVar(&metrics.OptimizerQosServerSamplingInterval, common.OptimizerQosServerSamplingIntervalFlag, time.Second*1, "interval to sample optimizer qos reports") cmdRPCConsumer.Flags().IntVar(&chainlib.WebSocketRateLimit, common.RateLimitWebSocketFlag, chainlib.WebSocketRateLimit, "rate limit (per second) websocket requests per user connection, default is unlimited") cmdRPCConsumer.Flags().DurationVar(&chainlib.WebSocketBanDuration, common.BanDurationForWebsocketRateLimitExceededFlag, chainlib.WebSocketBanDuration, "once websocket rate limit is reached, user will be banned Xfor a duration, default no ban") + cmdRPCConsumer.Flags().Bool(LavaOverLavaBackupFlagName, true, "enable lava over lava backup to regular rpc calls") common.AddRollingLogConfig(cmdRPCConsumer) return cmdRPCConsumer } diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index a2ae109f0f..a1c6e55823 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "net/http" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/goccy/go-json" @@ -77,6 +79,7 @@ type RPCConsumerServer struct { chainListener chainlib.ChainListener connectedSubscriptionsLock sync.RWMutex relayRetriesManager *lavaprotocol.RelayRetriesManager + initialized atomic.Bool } type relayResponse struct { @@ -166,8 +169,11 @@ func (rpccs *RPCConsumerServer) sendCraftedRelaysWrapper(initialRelays bool) (bo // Only start after everything is initialized - check consumer session manager rpccs.waitForPairing() } - - return rpccs.sendCraftedRelays(MaxRelayRetries, initialRelays) + success, err := rpccs.sendCraftedRelays(MaxRelayRetries, initialRelays) + if success { + rpccs.initialized.Store(true) + } + return success, err } func (rpccs *RPCConsumerServer) waitForPairing() { @@ -1551,6 +1557,32 @@ func (rpccs *RPCConsumerServer) IsHealthy() bool { return rpccs.relaysMonitor.IsHealthy() } +func (rpccs *RPCConsumerServer) IsInitialized() bool { + if rpccs == nil { + return false + } + + return rpccs.initialized.Load() +} + +func (rpccs *RPCConsumerServer) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + guid := utils.GenerateUniqueIdentifier() + ctx = utils.WithUniqueIdentifier(ctx, guid) + url, data, connectionType, metadata, err := rpccs.chainParser.ExtractDataFromRequest(req) + if err != nil { + return nil, err + } + relayResult, err := rpccs.SendRelay(ctx, url, data, connectionType, "", "", nil, metadata) + if err != nil { + return nil, err + } + resp, err := rpccs.chainParser.SetResponseFromRelayResult(relayResult) + rpccs.rpcConsumerLogs.SetLoLResponse(err == nil) + return resp, err +} + func (rpccs *RPCConsumerServer) updateProtocolMessageIfNeededWithNewEarliestData( ctx context.Context, relayState *RelayState, diff --git a/protocol/statetracker/state_tracker.go b/protocol/statetracker/state_tracker.go index a87ecbf8b7..6a119b3ca3 100644 --- a/protocol/statetracker/state_tracker.go +++ b/protocol/statetracker/state_tracker.go @@ -18,12 +18,14 @@ import ( const ( BlocksToSaveLavaChainTracker = 1 // we only need the latest block TendermintConsensusParamsQuery = "consensus_params" + MAINNET_SPEC = "LAVA" + TESTNET_SPEC = "LAV1" ) var ( lavaSpecName = "" // TODO: add a governance param change that indicates what spec id belongs to lava. - lavaSpecOptions = []string{"LAV1", "LAVA"} + LavaSpecOptions = []string{TESTNET_SPEC, MAINNET_SPEC} ) // ConsumerStateTracker CSTis a class for tracking consumer data from the lava blockchain, such as epoch changes. @@ -68,7 +70,7 @@ func GetLavaSpecWithRetry(ctx context.Context, specQueryClient spectypes.QueryCl var err error for i := 0; i < updaters.BlockResultRetry; i++ { if lavaSpecName == "" { // spec name is not initialized, try fetching specs. - for _, specId := range lavaSpecOptions { + for _, specId := range LavaSpecOptions { specResponse, err = specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ ChainID: specId, }) @@ -195,3 +197,12 @@ func (st *StateTracker) RegisterForUpdates(ctx context.Context, updater Updater) func (st *StateTracker) GetEventTracker() *updaters.EventTracker { return st.EventTracker } + +func IsLavaNativeSpec(checked string) bool { + for _, nativeLavaChain := range LavaSpecOptions { + if checked == nativeLavaChain { + return true + } + } + return false +} diff --git a/scripts/test/vote_test.sh b/scripts/test/vote_test.sh new file mode 100755 index 0000000000..79f5a22377 --- /dev/null +++ b/scripts/test/vote_test.sh @@ -0,0 +1,31 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source $__dir/../useful_commands.sh +. ${__dir}/vars/variables.sh +# Making sure old screens are not running +echo "current vote number $(latest_vote)" +killall screen +screen -wipe +GASPRICE="0.00002ulava" + +delegate_amount=1000000000000ulava +delegate_amount_big=49000000000000ulava +operator=$(lavad q staking validators --output json | jq -r ".validators[0].operator_address") +echo "operator: $operator" +lavad tx staking delegate $operator $delegate_amount --from bob --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount --from user1 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount --from user2 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount_big --from user3 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +lavad tx staking delegate $operator $delegate_amount_big --from user4 --chain-id lava --gas-prices $GASPRICE --gas-adjustment 1.5 --gas auto -y +wait_count_blocks 1 +lavad tx gov submit-legacy-proposal plans-add ./cookbook/plans/test_plans/default.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +echo; echo "#### Waiting 2 blocks ####" +wait_count_blocks 2 +# voting abstain with 50% of the voting power, yes with 2% of the voting power no with 1% of the voting power +lavad tx gov vote $(latest_vote) abstain -y --from user3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) yes -y --from user2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) yes -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote $(latest_vote) no -y --from bob --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +echo "latest vote: $(latest_vote)" +lavad q gov proposal $(latest_vote) \ No newline at end of file From 1b12d07c7c102d949442a0786283c4780efea700 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:13:48 +0200 Subject: [PATCH 14/18] Remove unused flag (#1815) --- protocol/chainlib/chain_router.go | 5 +---- protocol/chainlib/chain_router_test.go | 8 -------- protocol/chainlib/chainlib.go | 5 ----- protocol/rpcprovider/rpcprovider.go | 1 - 4 files changed, 1 insertion(+), 18 deletions(-) diff --git a/protocol/chainlib/chain_router.go b/protocol/chainlib/chain_router.go index 7a00cf5b94..9c07a6bdbb 100644 --- a/protocol/chainlib/chain_router.go +++ b/protocol/chainlib/chain_router.go @@ -326,14 +326,11 @@ func newChainRouter(ctx context.Context, nConns uint, rpcProviderEndpoint lavase } } if hasSubscriptionInSpec && apiCollection.Enabled && !webSocketSupported { - err := utils.LavaFormatError("subscriptions are applicable for this chain, but websocket is not provided in 'supported' map. By not setting ws/wss your provider wont be able to accept ws subscriptions, therefore might receive less rewards and lower QOS score.", nil, + return nil, utils.LavaFormatError("subscriptions are applicable for this chain, but websocket is not provided in 'supported' map. By not setting ws/wss your provider wont be able to accept ws subscriptions, therefore might receive less rewards and lower QOS score.", nil, utils.LogAttr("apiInterface", apiCollection.CollectionData.ApiInterface), utils.LogAttr("supportedMap", supportedMap), utils.LogAttr("required", WebSocketExtension), ) - if !IgnoreSubscriptionNotConfiguredError { - return nil, err - } } utils.LavaFormatDebug("router keys", utils.LogAttr("chainProxyRouter", chainProxyRouter)) diff --git a/protocol/chainlib/chain_router_test.go b/protocol/chainlib/chain_router_test.go index aefdc84b1c..7d80f343a1 100644 --- a/protocol/chainlib/chain_router_test.go +++ b/protocol/chainlib/chain_router_test.go @@ -40,8 +40,6 @@ func TestChainRouterWithDisabledWebSocketInSpec(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -400,8 +398,6 @@ func TestChainRouterWithEnabledWebSocketInSpec(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -795,8 +791,6 @@ func TestChainRouterWithMethodRoutes(t *testing.T) { chainParser, err := NewChainParser(apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - addonsOptions := []string{"-addon-", "-addon2-"} extensionsOptions := []string{"-test-", "-test2-", "-test3-"} @@ -2181,8 +2175,6 @@ func TestChainRouterWithInternalPaths(t *testing.T) { chainParser, err := NewChainParser(play.apiInterface) require.NoError(t, err) - IgnoreSubscriptionNotConfiguredError = false - spec := testcommon.CreateMockSpec() spec.ApiCollections = play.specApiCollections chainParser.SetSpec(spec) diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 5c3fbd9b7f..41c024ae04 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -21,11 +21,6 @@ const ( INTERNAL_ADDRESS = "internal-addr" ) -var ( - IgnoreSubscriptionNotConfiguredError = true - IgnoreSubscriptionNotConfiguredErrorFlag = "ignore-subscription-not-configured-error" -) - func NewChainParser(apiInterface string) (chainParser ChainParser, err error) { switch apiInterface { case spectypes.APIInterfaceJsonRPC: diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 794b64f295..66d99e2519 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -829,7 +829,6 @@ rpcprovider 127.0.0.1:3333 OSMOSIS tendermintrpc "wss://www.node-path.com:80,htt cmdRPCProvider.Flags().Duration(common.RelayHealthIntervalFlag, RelayHealthIntervalFlagDefault, "interval between relay health checks") cmdRPCProvider.Flags().String(HealthCheckURLPathFlagName, HealthCheckURLPathFlagDefault, "the url path for the provider's grpc health check") cmdRPCProvider.Flags().DurationVar(&updaters.TimeOutForFetchingLavaBlocks, common.TimeOutForFetchingLavaBlocksFlag, time.Second*5, "setting the timeout for fetching lava blocks") - cmdRPCProvider.Flags().BoolVar(&chainlib.IgnoreSubscriptionNotConfiguredError, chainlib.IgnoreSubscriptionNotConfiguredErrorFlag, chainlib.IgnoreSubscriptionNotConfiguredError, "ignore webSocket node url not configured error, when subscription is enabled in spec") cmdRPCProvider.Flags().IntVar(&numberOfRetriesAllowedOnNodeErrors, common.SetRelayCountOnNodeErrorFlag, 2, "set the number of retries attempt on node errors") cmdRPCProvider.Flags().String(common.UseStaticSpecFlag, "", "load offline spec provided path to spec file, used to test specs before they are proposed on chain, example for spec with inheritance: --use-static-spec ./cookbook/specs/ibc.json,./cookbook/specs/tendermint.json,./cookbook/specs/cosmossdk.json,./cookbook/specs/ethermint.json,./cookbook/specs/ethereum.json,./cookbook/specs/evmos.json") cmdRPCProvider.Flags().Uint64(common.RateLimitRequestPerSecondFlag, 0, "Measuring the load relative to this number for feedback - per second - per chain - default unlimited. Given Y simultaneous relay calls, a value of X and will measure Y/X load rate.") From 100ba7ebd70f56cbb2282a7fe7c74330e5a8fc84 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:35:37 +0200 Subject: [PATCH 15/18] Fix solana subscription (#1816) --- cookbook/specs/solana.json | 151 +++++++++++------- .../chainlib/chainproxy/rpcclient/handler.go | 24 ++- .../chainlib/chainproxy/rpcclient/json.go | 13 +- .../chainproxy/rpcclient/subscription.go | 2 +- 4 files changed, 119 insertions(+), 71 deletions(-) diff --git a/cookbook/specs/solana.json b/cookbook/specs/solana.json index 5089de1331..07d746c62f 100755 --- a/cookbook/specs/solana.json +++ b/cookbook/specs/solana.json @@ -964,7 +964,91 @@ "stateful": 0 }, "extra_compute_units": 0 + } + ], + "headers": [], + "inheritance_apis": [], + "parse_directives": [ + { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getLatestBlockhash\",\"params\":[{\"commitment\":\"finalized\"}],\"id\":1}", + "function_tag": "GET_BLOCKNUM", + "result_parsing": { + "parser_arg": [ + "0", + "context", + "slot" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getLatestBlockhash" }, + { + "function_tag": "GET_BLOCK_BY_NUM", + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getBlock\",\"params\":[%d,{\"transactionDetails\":\"none\",\"rewards\":false}],\"id\":1}", + "result_parsing": { + "parser_arg": [ + "0", + "blockhash" + ], + "parser_func": "PARSE_CANONICAL", + "encoding": "base64" + }, + "api_name": "getBlock" + } + ], + "verifications": [ + { + "name": "version", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getVersion\",\"params\":[],\"id\":1}", + "function_tag": "VERIFICATION", + "result_parsing": { + "parser_arg": [ + "0", + "solana-core" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getVersion" + }, + "values": [ + { + "expected_value": "*" + } + ] + }, + { + "name": "tokens-owner-indexed", + "parse_directive": { + "function_template": "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"getTokenAccountsByOwner\",\"params\":[\"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F\",{\"programId\":\"TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA\"},{\"encoding\":\"jsonParsed\"}]}", + "function_tag": "VERIFICATION", + "result_parsing": { + "parser_arg": [ + "0", + "value" + ], + "parser_func": "PARSE_CANONICAL" + }, + "api_name": "getTokenAccountsByOwner" + }, + "values": [ + { + "expected_value": "*", + "severity": "Warning" + } + ] + } + ] + }, + { + "enabled": true, + "collection_data": { + "api_interface": "jsonrpc", + "internal_path": "/ws", + "type": "POST", + "add_on": "" + }, + "apis": [ { "name": "accountSubscribe", "block_parsing": { @@ -1294,30 +1378,17 @@ "inheritance_apis": [], "parse_directives": [ { - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getLatestBlockhash\",\"params\":[{\"commitment\":\"finalized\"}],\"id\":1}", "function_tag": "GET_BLOCKNUM", "result_parsing": { - "parser_arg": [ - "0", - "context", - "slot" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getLatestBlockhash" + "parser_func": "DEFAULT" + } }, { + "function_template": "%d", "function_tag": "GET_BLOCK_BY_NUM", - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getBlock\",\"params\":[%d,{\"transactionDetails\":\"none\",\"rewards\":false}],\"id\":1}", "result_parsing": { - "parser_arg": [ - "0", - "blockhash" - ], - "parser_func": "PARSE_CANONICAL", - "encoding": "base64" - }, - "api_name": "getBlock" + "parser_func": "DEFAULT" + } }, { "function_tag": "SUBSCRIBE", @@ -1401,49 +1472,7 @@ "api_name": "voteUnsubscribe" } ], - "verifications": [ - { - "name": "version", - "parse_directive": { - "function_template": "{\"jsonrpc\":\"2.0\",\"method\":\"getVersion\",\"params\":[],\"id\":1}", - "function_tag": "VERIFICATION", - "result_parsing": { - "parser_arg": [ - "0", - "solana-core" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getVersion" - }, - "values": [ - { - "expected_value": "*" - } - ] - }, - { - "name": "tokens-owner-indexed", - "parse_directive": { - "function_template": "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"getTokenAccountsByOwner\",\"params\":[\"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F\",{\"programId\":\"TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA\"},{\"encoding\":\"jsonParsed\"}]}", - "function_tag": "VERIFICATION", - "result_parsing": { - "parser_arg": [ - "0", - "value" - ], - "parser_func": "PARSE_CANONICAL" - }, - "api_name": "getTokenAccountsByOwner" - }, - "values": [ - { - "expected_value": "*", - "severity": "Warning" - } - ] - } - ] + "verifications": [] } ] }, diff --git a/protocol/chainlib/chainproxy/rpcclient/handler.go b/protocol/chainlib/chainproxy/rpcclient/handler.go index bb1aa31199..acedfd97d9 100755 --- a/protocol/chainlib/chainproxy/rpcclient/handler.go +++ b/protocol/chainlib/chainproxy/rpcclient/handler.go @@ -237,13 +237,16 @@ func (h *handler) handleImmediate(msg *JsonrpcMessage) bool { h.handleSubscriptionResultTendermint(msg) return true case msg.isEthereumNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { + if strings.HasSuffix(msg.Method, ethereumNotificationMethodSuffix) { h.handleSubscriptionResultEthereum(msg) return true + } else if strings.HasSuffix(msg.Method, solanaNotificationMethodSuffix) { + h.handleSubscriptionResultSolana(msg) + return true } return false case msg.isStarkNetPathfinderNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { + if strings.HasSuffix(msg.Method, ethereumNotificationMethodSuffix) { h.handleSubscriptionResultStarkNetPathfinder(msg) return true } @@ -258,7 +261,7 @@ func (h *handler) handleImmediate(msg *JsonrpcMessage) bool { } func (h *handler) handleSubscriptionResultStarkNetPathfinder(msg *JsonrpcMessage) { - var result starkNetPathfinderSubscriptionResult + var result integerIdSubscriptionResult if err := json.Unmarshal(msg.Result, &result); err != nil { utils.LavaFormatTrace("Dropping invalid starknet pathfinder subscription message", utils.LogAttr("err", err), @@ -290,6 +293,21 @@ func (h *handler) handleSubscriptionResultEthereum(msg *JsonrpcMessage) { } } +func (h *handler) handleSubscriptionResultSolana(msg *JsonrpcMessage) { + var result integerIdSubscriptionResult + if err := json.Unmarshal(msg.Params, &result); err != nil { + utils.LavaFormatTrace("Dropping invalid solana subscription message", + utils.LogAttr("err", err), + utils.LogAttr("params", string(msg.Params)), + ) + h.log.Debug("Dropping invalid subscription message") + return + } + if h.clientSubs[strconv.Itoa(result.ID)] != nil { + h.clientSubs[strconv.Itoa(result.ID)].deliver(msg) + } +} + func (h *handler) handleSubscriptionResultTendermint(msg *JsonrpcMessage) { var result tendermintSubscriptionResult if err := json.Unmarshal(msg.Result, &result); err != nil { diff --git a/protocol/chainlib/chainproxy/rpcclient/json.go b/protocol/chainlib/chainproxy/rpcclient/json.go index 794ad3ebe4..84ab2e0a6e 100755 --- a/protocol/chainlib/chainproxy/rpcclient/json.go +++ b/protocol/chainlib/chainproxy/rpcclient/json.go @@ -33,11 +33,12 @@ import ( ) const ( - Vsn = "2.0" - serviceMethodSeparator = "_" - subscribeMethodSuffix = "_subscribe" - unsubscribeMethodSuffix = "_unsubscribe" - notificationMethodSuffix = "_subscription" + Vsn = "2.0" + serviceMethodSeparator = "_" + subscribeMethodSuffix = "_subscribe" + unsubscribeMethodSuffix = "_unsubscribe" + ethereumNotificationMethodSuffix = "_subscription" + solanaNotificationMethodSuffix = "Notification" defaultWriteTimeout = 10 * time.Second // used if context has no deadline ) @@ -49,7 +50,7 @@ type ethereumSubscriptionResult struct { Result json.RawMessage `json:"result,omitempty"` } -type starkNetPathfinderSubscriptionResult struct { +type integerIdSubscriptionResult struct { ID int `json:"subscription"` Result json.RawMessage `json:"result,omitempty"` } diff --git a/protocol/chainlib/chainproxy/rpcclient/subscription.go b/protocol/chainlib/chainproxy/rpcclient/subscription.go index 803ecc171c..cc882364ac 100755 --- a/protocol/chainlib/chainproxy/rpcclient/subscription.go +++ b/protocol/chainlib/chainproxy/rpcclient/subscription.go @@ -181,7 +181,7 @@ func (n *Notifier) send(sub *Subscription, data json.RawMessage) error { ctx := context.Background() return n.h.conn.writeJSON(ctx, &JsonrpcMessage{ Version: Vsn, - Method: n.namespace + notificationMethodSuffix, + Method: n.namespace + ethereumNotificationMethodSuffix, Params: params, }) } From 70bfa01558954c89f27f3d2eb681825adf08ea86 Mon Sep 17 00:00:00 2001 From: Omer <100387053+omerlavanet@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:36:45 +0200 Subject: [PATCH 16/18] chore: added utils to test method routing (#1780) * added utils to test method routing * added osmosis to policy --- ... => lava_example_archive_method_route.yml} | 8 ++++---- .../policy_all_chains_with_extension.yml | 20 +++++++++++++++++++ scripts/init_chain_commands.sh | 6 +++++- scripts/test/httpServer.py | 9 +++++++-- 4 files changed, 36 insertions(+), 7 deletions(-) rename config/provider_examples/{lava_example_archive_methodroute.yml => lava_example_archive_method_route.yml} (84%) diff --git a/config/provider_examples/lava_example_archive_methodroute.yml b/config/provider_examples/lava_example_archive_method_route.yml similarity index 84% rename from config/provider_examples/lava_example_archive_methodroute.yml rename to config/provider_examples/lava_example_archive_method_route.yml index e8cbf3bad9..5e4f76b9dc 100644 --- a/config/provider_examples/lava_example_archive_methodroute.yml +++ b/config/provider_examples/lava_example_archive_method_route.yml @@ -2,14 +2,14 @@ endpoints: - api-interface: tendermintrpc chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: ws://127.0.0.1:26657/websocket - url: http://127.0.0.1:26657 - url: http://127.0.0.1:26657 addons: - archive - - url: https://trustless-api.com + - url: http://127.0.0.1:4444 methods: - block - block_by_hash @@ -18,7 +18,7 @@ endpoints: - api-interface: grpc chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: 127.0.0.1:9090 - url: 127.0.0.1:9090 @@ -27,7 +27,7 @@ endpoints: - api-interface: rest chain-id: LAV1 network-address: - address: "127.0.0.1:2220" + address: "127.0.0.1:2224" node-urls: - url: http://127.0.0.1:1317 - url: http://127.0.0.1:1317 diff --git a/cookbook/projects/policy_all_chains_with_extension.yml b/cookbook/projects/policy_all_chains_with_extension.yml index 491e9bd047..59c25bcb00 100644 --- a/cookbook/projects/policy_all_chains_with_extension.yml +++ b/cookbook/projects/policy_all_chains_with_extension.yml @@ -110,6 +110,26 @@ Policy: extensions: - "archive" mixed: true + - chain_id: OSMOSIS + requirements: + - collection: + api_interface: "rest" + type: "GET" + extensions: + - "archive" + mixed: true + - collection: + api_interface: "grpc" + type: "" + extensions: + - "archive" + mixed: true + - collection: + api_interface: "tendermintrpc" + type: "" + extensions: + - "archive" + mixed: true - chain_id: COSMOSHUB requirements: - collection: diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index 295f201531..75faa7345d 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -52,6 +52,7 @@ PROVIDERSTAKE="500000000000ulava" PROVIDER1_LISTENER="127.0.0.1:2221" PROVIDER2_LISTENER="127.0.0.1:2222" PROVIDER3_LISTENER="127.0.0.1:2223" +# PROVIDER4_LISTENER="127.0.0.1:2224" sleep 4 @@ -67,7 +68,7 @@ lavad tx gov vote $(latest_vote) yes -y --from alice --gas-adjustment "1.5" --ga echo; echo "#### Buy DefaultPlan subscription for user1 ####" lavad tx subscription buy DefaultPlan $(lavad keys show user1 -a) --enable-auto-renewal -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # wait_count_blocks 2 -# lavad tx project set-policy $(lavad keys show user1 -a)-admin ./cookbook/projects/policy_all_chains_with_addon.yml -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +# lavad tx project set-policy $(lavad keys show user1 -a)-admin ./cookbook/projects/policy_all_chains_with_extension.yml -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # MANTLE CHAINS="ETH1,SEP1,HOL1,OSMOSIS,FTM250,CELO,LAV1,OSMOSIST,ALFAJORES,ARB1,ARBN,APT1,STRK,JUN1,COSMOSHUB,POLYGON1,EVMOS,OPTM,BASES,CANTO,SUIT,SOLANA,BSC,AXELAR,AVAX,FVM,NEAR,SQDSUBGRAPH,AGR,AGRT,KOIIT,AVAXT,CELESTIATM" @@ -82,6 +83,9 @@ lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER2_LIS echo; echo "#### Staking provider 3 ####" lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER3_LISTENER,1" 1 $(operator_address) -y --delegate-commission 50 --from servicer3 --provider-moniker "servicer3" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +# echo; echo "#### Staking provider 4 ####" +# lavad tx pairing bulk-stake-provider $BASE_CHAINS $PROVIDERSTAKE "$PROVIDER4_LISTENER,1" 1 $(operator_address) -y --delegate-commission 50 --from servicer4 --provider-moniker "servicer4" --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + echo; echo "#### Waiting 1 block ####" wait_count_blocks 1 diff --git a/scripts/test/httpServer.py b/scripts/test/httpServer.py index 94358ac9d6..ddbe4e77a4 100644 --- a/scripts/test/httpServer.py +++ b/scripts/test/httpServer.py @@ -1,6 +1,8 @@ from http.server import BaseHTTPRequestHandler, HTTPServer import sys +payload_ret = "OK" + class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): self.print_request() @@ -26,10 +28,11 @@ def print_request(self): print(f"Body:\n{body.decode('utf-8')}") # Send a response back to the client + response = payload_ret.encode('utf-8') self.send_response(200) - self.send_header("Content-type", "text/html") + self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(b"OK") + self.wfile.write(response) def run_server(port=8000): server_address = ('', port) @@ -40,6 +43,8 @@ def run_server(port=8000): if __name__ == '__main__': if len(sys.argv) > 1: port = int(sys.argv[1]) + if len(sys.argv) > 2: + payload_ret = sys.argv[2] run_server(port) else: run_server() \ No newline at end of file From 064945d91d504f6127943878a39936803ae430ba Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:29:59 +0200 Subject: [PATCH 17/18] Rename leftovers of "portal" to "connsumer" (#1820) --- protocol/integration/protocol_test.go | 2 +- protocol/rpcconsumer/rpcconsumer_server.go | 2 +- scripts/pre_setups/init_eth_archive_mix.sh | 2 +- scripts/setup_providers.sh | 4 ++-- scripts/test/jail_provider_test.sh | 4 ++-- scripts/test_spec_full.sh | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/protocol/integration/protocol_test.go b/protocol/integration/protocol_test.go index a0ed5126d9..a5b2b86f84 100644 --- a/protocol/integration/protocol_test.go +++ b/protocol/integration/protocol_test.go @@ -2154,7 +2154,7 @@ func TestArchiveProvidersRetryOnParsedHash(t *testing.T) { ChainId: specId, SeenBlock: 1005, BlocksHashesToHeights: []*pairingtypes.BlockHashToHeight{{Hash: blockHash, Height: spectypes.NOT_APPLICABLE}}, - }) // caching in the portal doesn't care about hashes, and we don't have data on finalization yet + }) // caching in the consumer doesn't care about hashes, and we don't have data on finalization yet cancel() if err != nil { continue diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index a1c6e55823..a774f0e1da 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -632,7 +632,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( SharedStateId: sharedStateId, SeenBlock: protocolMessage.RelayPrivateData().SeenBlock, BlocksHashesToHeights: rpccs.newBlocksHashesToHeightsSliceFromRequestedBlockHashes(protocolMessage.GetRequestedBlocksHashes()), - }) // caching in the portal doesn't care about hashes, and we don't have data on finalization yet + }) // caching in the consumer doesn't care about hashes, and we don't have data on finalization yet cancel() reply := cacheReply.GetReply() diff --git a/scripts/pre_setups/init_eth_archive_mix.sh b/scripts/pre_setups/init_eth_archive_mix.sh index 2d9291a578..134dfcb21e 100755 --- a/scripts/pre_setups/init_eth_archive_mix.sh +++ b/scripts/pre_setups/init_eth_archive_mix.sh @@ -67,7 +67,7 @@ screen -d -m -S provider$i bash -c "source ~/.bashrc; lavap rpcprovider \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer$i --chain-id lava 2>&1 | tee $LOGS_DIR/PROVIDER$i.log" && sleep 0.25 screen -d -m -S portals bash -c "source ~/.bashrc; lavap rpcconsumer consumer_examples/ethereum_example.yml\ -$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/CONSUMER.log" && sleep 0.25 echo "--- setting up screens done ---" screen -ls diff --git a/scripts/setup_providers.sh b/scripts/setup_providers.sh index a3b5fa9b0d..127640da41 100755 --- a/scripts/setup_providers.sh +++ b/scripts/setup_providers.sh @@ -100,9 +100,9 @@ $EXTRA_PROVIDER_FLAGS --geolocation "$GEOLOCATION" --log_level debug --from serv # $PROVIDER3_LISTENER MANTLE jsonrpc '$MANTLE_JRPC' \ echo; echo "#### Starting consumer ####" -# Setup Portal +# Setup Consumer screen -d -m -S portals bash -c "source ~/.bashrc; lavap rpcconsumer consumer_examples/full_consumer_example.yml\ -$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation "$GEOLOCATION" --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --strategy distributed 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --cache-be "127.0.0.1:7778" --geolocation "$GEOLOCATION" --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --strategy distributed 2>&1 | tee $LOGS_DIR/CONSUMER.log" && sleep 0.25 # 127.0.0.1:3385 MANTLE jsonrpc \ echo "--- setting up screens done ---" diff --git a/scripts/test/jail_provider_test.sh b/scripts/test/jail_provider_test.sh index 878754b542..f190e70376 100755 --- a/scripts/test/jail_provider_test.sh +++ b/scripts/test/jail_provider_test.sh @@ -74,11 +74,11 @@ $PROVIDER4_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ $PROVIDER4_LISTENER LAV1 grpc '$LAVA_GRPC' \ $EXTRA_PROVIDER_FLAGS --chain-id=lava --metrics-listen-address ":7780" --geolocation 1 --log_level debug --from servicer4 2>&1 | tee $LOGS_DIR/PROVIDER4.log" -# Setup Portal +# Setup Consumer screen -d -m -S portals bash -c "source ~/.bashrc; lava-protocol rpcconsumer \ 127.0.0.1:3333 ETH1 jsonrpc \ 127.0.0.1:3360 LAV1 rest 127.0.0.1:3361 LAV1 tendermintrpc 127.0.0.1:3362 LAV1 grpc \ -$EXTRA_PORTAL_FLAGS --metrics-listen-address ":7779" --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/PORTAL.log" +$EXTRA_PORTAL_FLAGS --metrics-listen-address ":7779" --geolocation 1 --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing 2>&1 | tee $LOGS_DIR/CONSUMER.log" # need to wait 8 epochs for the provider to be jail eligible diff --git a/scripts/test_spec_full.sh b/scripts/test_spec_full.sh index 9971f9959c..cd504f47ec 100755 --- a/scripts/test_spec_full.sh +++ b/scripts/test_spec_full.sh @@ -206,7 +206,7 @@ done echo "[+]generated consumer config: $output_consumer_yaml" cat $output_consumer_yaml if [ "$dry" = false ]; then - screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer testutil/debugging/logs/consumer.yml $EXTRA_PORTAL_FLAGS --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/PORTAL.log" + screen -d -m -S consumers bash -c "source ~/.bashrc; lavap rpcconsumer testutil/debugging/logs/consumer.yml $EXTRA_PORTAL_FLAGS --geolocation 1 --debug-relays --log_level debug --from user1 --chain-id lava --allow-insecure-provider-dialing --metrics-listen-address ":7779" 2>&1 | tee $LOGS_DIR/CONSUMER.log" echo "[+] letting providers start and running health check then running command with flags: $test_consumer_command_args" sleep 10 From e9cfc7d16eb23cc771863104999d38237db94266 Mon Sep 17 00:00:00 2001 From: Elad Gildnur <6321801+shleikes@users.noreply.github.com> Date: Tue, 3 Dec 2024 14:16:50 +0200 Subject: [PATCH 18/18] feat: PRT - Add the consumer address to the QoS report (#1819) * Add the consumer address to the QoS report * Test fix --------- Co-authored-by: Ran Mishael <106548467+ranlavanet@users.noreply.github.com> --- .../metrics/consumer_optimizer_qos_client.go | 18 +++--- .../provider_optimizer_test.go | 2 +- protocol/rpcconsumer/rpcconsumer.go | 55 ++++++++++++------- 3 files changed, 46 insertions(+), 29 deletions(-) diff --git a/protocol/metrics/consumer_optimizer_qos_client.go b/protocol/metrics/consumer_optimizer_qos_client.go index 3e12e23dfc..72183853d9 100644 --- a/protocol/metrics/consumer_optimizer_qos_client.go +++ b/protocol/metrics/consumer_optimizer_qos_client.go @@ -21,9 +21,10 @@ var ( ) type ConsumerOptimizerQoSClient struct { - consumerOrigin string - queueSender *QueueSender - optimizers map[string]OptimizerInf // keys are chain ids + consumerHostname string + consumerAddress string + queueSender *QueueSender + optimizers map[string]OptimizerInf // keys are chain ids // keys are chain ids, values are maps with provider addresses as keys chainIdToProviderToRelaysCount map[string]map[string]uint64 chainIdToProviderToNodeErrorsCount map[string]map[string]uint64 @@ -49,7 +50,8 @@ type OptimizerQoSReportToSend struct { LatencyScore float64 `json:"latency_score"` GenericScore float64 `json:"generic_score"` ProviderAddress string `json:"provider"` - ConsumerOrigin string `json:"consumer"` + ConsumerHostname string `json:"consumer_hostname"` + ConsumerAddress string `json:"consumer_pub_address"` ChainId string `json:"chain_id"` NodeErrorRate float64 `json:"node_error_rate"` Epoch uint64 `json:"epoch"` @@ -69,14 +71,15 @@ type OptimizerInf interface { CalculateQoSScoresForMetrics(allAddresses []string, ignoredProviders map[string]struct{}, cu uint64, requestedBlock int64) []*OptimizerQoSReport } -func NewConsumerOptimizerQoSClient(endpointAddress string, interval ...time.Duration) *ConsumerOptimizerQoSClient { +func NewConsumerOptimizerQoSClient(consumerAddress, endpointAddress string, interval ...time.Duration) *ConsumerOptimizerQoSClient { hostname, err := os.Hostname() if err != nil { utils.LavaFormatWarning("Error while getting hostname for ConsumerOptimizerQoSClient", err) hostname = "unknown" + strconv.FormatUint(rand.Uint64(), 10) // random seed for different unknowns } return &ConsumerOptimizerQoSClient{ - consumerOrigin: hostname, + consumerHostname: hostname, + consumerAddress: consumerAddress, queueSender: NewQueueSender(endpointAddress, "ConsumerOptimizerQoS", nil, interval...), optimizers: map[string]OptimizerInf{}, chainIdToProviderToRelaysCount: map[string]map[string]uint64{}, @@ -130,7 +133,8 @@ func (coqc *ConsumerOptimizerQoSClient) appendOptimizerQoSReport(report *Optimiz // must be called under read lock optimizerQoSReportToSend := OptimizerQoSReportToSend{ Timestamp: time.Now(), - ConsumerOrigin: coqc.consumerOrigin, + ConsumerHostname: coqc.consumerHostname, + ConsumerAddress: coqc.consumerAddress, SyncScore: report.SyncScore, AvailabilityScore: report.AvailabilityScore, LatencyScore: report.LatencyScore, diff --git a/protocol/provideroptimizer/provider_optimizer_test.go b/protocol/provideroptimizer/provider_optimizer_test.go index fc8427a9dc..6de13de8b6 100644 --- a/protocol/provideroptimizer/provider_optimizer_test.go +++ b/protocol/provideroptimizer/provider_optimizer_test.go @@ -781,7 +781,7 @@ func TestProviderOptimizerWithOptimizerQoSClient(t *testing.T) { chainId := "dontcare" - consumerOptimizerQoSClient := metrics.NewConsumerOptimizerQoSClient(mockHttpServer.URL, 1*time.Second) + consumerOptimizerQoSClient := metrics.NewConsumerOptimizerQoSClient("lava@test", mockHttpServer.URL, 1*time.Second) consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(context.Background(), 900*time.Millisecond) providerOptimizer := NewProviderOptimizer(STRATEGY_BALANCED, TEST_AVERAGE_BLOCK_TIME, TEST_BASE_WORLD_LATENCY, 10, consumerOptimizerQoSClient, chainId) diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 94784cdd2e..dc92b3e117 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -131,6 +131,33 @@ type rpcConsumerStartOptions struct { staticProvidersList []*lavasession.RPCProviderEndpoint // define static providers as backup to lava providers } +func getConsumerAddressAndKeys(clientCtx client.Context) (sdk.AccAddress, *secp256k1.PrivateKey, error) { + keyName, err := sigs.GetKeyName(clientCtx) + if err != nil { + return nil, nil, fmt.Errorf("failed getting key name from clientCtx: %w", err) + } + + privKey, err := sigs.GetPrivKey(clientCtx, keyName) + if err != nil { + return nil, nil, fmt.Errorf("failed getting private key from key name %s: %w", keyName, err) + } + + clientKey, _ := clientCtx.Keyring.Key(keyName) + pubkey, err := clientKey.GetPubKey() + if err != nil { + return nil, nil, fmt.Errorf("failed getting public key from key name %s: %w", keyName, err) + } + + var consumerAddr sdk.AccAddress + err = consumerAddr.Unmarshal(pubkey.Address()) + if err != nil { + return nil, nil, fmt.Errorf("failed unmarshaling public address for key %s (pubkey: %v): %w", + keyName, pubkey.Address(), err) + } + + return consumerAddr, privKey, nil +} + // spawns a new RPCConsumer server with all it's processes and internals ready for communications func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOptions) (err error) { if common.IsTestMode(ctx) { @@ -139,11 +166,16 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt options.refererData.ReferrerClient = metrics.NewConsumerReferrerClient(options.refererData.Address) consumerReportsManager := metrics.NewConsumerReportsClient(options.analyticsServerAddresses.ReportsAddressFlag) + consumerAddr, privKey, err := getConsumerAddressAndKeys(options.clientCtx) + if err != nil { + utils.LavaFormatFatal("failed to get consumer address and keys", err) + } + consumerUsageServeManager := metrics.NewConsumerRelayServerClient(options.analyticsServerAddresses.RelayServerAddress) // start up relay server reporting var consumerOptimizerQoSClient *metrics.ConsumerOptimizerQoSClient if options.analyticsServerAddresses.OptimizerQoSAddress != "" || options.analyticsServerAddresses.OptimizerQoSListen { - consumerOptimizerQoSClient = metrics.NewConsumerOptimizerQoSClient(options.analyticsServerAddresses.OptimizerQoSAddress, metrics.OptimizerQosServerPushInterval) // start up optimizer qos client - consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(ctx, metrics.OptimizerQosServerSamplingInterval) // start up optimizer qos client + consumerOptimizerQoSClient = metrics.NewConsumerOptimizerQoSClient(consumerAddr.String(), options.analyticsServerAddresses.OptimizerQoSAddress, metrics.OptimizerQosServerPushInterval) // start up optimizer qos client + consumerOptimizerQoSClient.StartOptimizersQoSReportsCollecting(ctx, metrics.OptimizerQosServerSamplingInterval) // start up optimizer qos client } consumerMetricsManager := metrics.NewConsumerMetricsManager(metrics.ConsumerMetricsManagerOptions{ NetworkAddress: options.analyticsServerAddresses.MetricsListenAddress, @@ -179,26 +211,7 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt lavaChainFetcher.FetchLatestBlockNum(ctx) lavaChainID := options.clientCtx.ChainID - keyName, err := sigs.GetKeyName(options.clientCtx) - if err != nil { - utils.LavaFormatFatal("failed getting key name from clientCtx", err) - } - privKey, err := sigs.GetPrivKey(options.clientCtx, keyName) - if err != nil { - utils.LavaFormatFatal("failed getting private key from key name", err, utils.Attribute{Key: "keyName", Value: keyName}) - } - clientKey, _ := options.clientCtx.Keyring.Key(keyName) - pubkey, err := clientKey.GetPubKey() - if err != nil { - utils.LavaFormatFatal("failed getting public key from key name", err, utils.Attribute{Key: "keyName", Value: keyName}) - } - - var consumerAddr sdk.AccAddress - err = consumerAddr.Unmarshal(pubkey.Address()) - if err != nil { - utils.LavaFormatFatal("failed unmarshaling public address", err, utils.Attribute{Key: "keyName", Value: keyName}, utils.Attribute{Key: "pubkey", Value: pubkey.Address()}) - } // we want one provider optimizer per chain so we will store them for reuse across rpcEndpoints chainMutexes := map[string]*sync.Mutex{} for _, endpoint := range options.rpcEndpoints {