From 9c6617950a2afacff682a7c64723d224456c7acc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=E1=BA=A3i=20Nam?= Date: Wed, 22 Jan 2025 11:13:32 +0700 Subject: [PATCH] fix: fix possible panic with batchSize (#703) * fix: fix possible panic with batchSize * chore: log if offset greater than length --- .../deltaswap-v1/pool_list_updater.go | 13 ++++++++++--- .../ether-vista/pool_list_updater.go | 13 ++++++++++--- pkg/liquidity-source/ringswap/pool_list_updater.go | 13 ++++++++++--- .../solidly-v2/pool_list_updater.go | 13 ++++++++++--- pkg/liquidity-source/swap-x-v2/pool_list_updater.go | 13 ++++++++++--- .../uniswap-v1/pool_list_updater.go | 13 ++++++++++--- .../uniswap-v2/pool_list_updater.go | 13 ++++++++++--- .../velodrome-v1/pool_list_updater.go | 13 ++++++++++--- .../velodrome-v2/pool_list_updater.go | 13 ++++++++++--- .../virtual-fun/pool_list_updater.go | 13 ++++++++++--- 10 files changed, 100 insertions(+), 30 deletions(-) diff --git a/pkg/liquidity-source/deltaswap-v1/pool_list_updater.go b/pkg/liquidity-source/deltaswap-v1/pool_list_updater.go index ac9247bc2..ff93f9117 100644 --- a/pkg/liquidity-source/deltaswap-v1/pool_list_updater.go +++ b/pkg/liquidity-source/deltaswap-v1/pool_list_updater.go @@ -59,7 +59,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -255,13 +255,20 @@ func (u *PoolsListUpdater) newMetadata(newOffset int) ([]byte, error) { // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/ether-vista/pool_list_updater.go b/pkg/liquidity-source/ether-vista/pool_list_updater.go index 56dd41150..1d9c851b0 100644 --- a/pkg/liquidity-source/ether-vista/pool_list_updater.go +++ b/pkg/liquidity-source/ether-vista/pool_list_updater.go @@ -62,7 +62,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -258,13 +258,20 @@ func (u *PoolsListUpdater) newMetadata(newOffset int) ([]byte, error) { // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/ringswap/pool_list_updater.go b/pkg/liquidity-source/ringswap/pool_list_updater.go index 387653d6a..3876b591c 100644 --- a/pkg/liquidity-source/ringswap/pool_list_updater.go +++ b/pkg/liquidity-source/ringswap/pool_list_updater.go @@ -59,7 +59,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -310,13 +310,20 @@ func (u *PoolsListUpdater) newMetadata(newOffset int) ([]byte, error) { // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/solidly-v2/pool_list_updater.go b/pkg/liquidity-source/solidly-v2/pool_list_updater.go index 37d75c5f1..1954ab120 100644 --- a/pkg/liquidity-source/solidly-v2/pool_list_updater.go +++ b/pkg/liquidity-source/solidly-v2/pool_list_updater.go @@ -66,7 +66,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) poolAddresses, err := u.listPoolAddresses(ctx, offset, batchSize) if err != nil { @@ -329,13 +329,20 @@ func (u *PoolsListUpdater) newStaticExtra(poolMetadata PoolMetadata) ([]byte, er // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/swap-x-v2/pool_list_updater.go b/pkg/liquidity-source/swap-x-v2/pool_list_updater.go index 4b0e1c6f4..220b0b472 100644 --- a/pkg/liquidity-source/swap-x-v2/pool_list_updater.go +++ b/pkg/liquidity-source/swap-x-v2/pool_list_updater.go @@ -72,7 +72,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) poolAddresses, err := u.listPoolAddresses(ctx, offset, batchSize) if err != nil { @@ -342,13 +342,20 @@ func (u *PoolsListUpdater) newStaticExtra(poolMetadata velodromev2.PoolMetadata) // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/uniswap-v1/pool_list_updater.go b/pkg/liquidity-source/uniswap-v1/pool_list_updater.go index 1b9a24df3..5e18f8e09 100644 --- a/pkg/liquidity-source/uniswap-v1/pool_list_updater.go +++ b/pkg/liquidity-source/uniswap-v1/pool_list_updater.go @@ -65,7 +65,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(totalExchanges, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(totalExchanges, u.config.NewPoolLimit, offset) exchanges, err := u.listExchanges(ctx, offset, batchSize) if err != nil { @@ -236,13 +236,20 @@ func (u *PoolsListUpdater) newMetadata(newOffset int) ([]byte, error) { return metadataBytes, nil } -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": DexType, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/uniswap-v2/pool_list_updater.go b/pkg/liquidity-source/uniswap-v2/pool_list_updater.go index 268a639c4..5f33c0615 100644 --- a/pkg/liquidity-source/uniswap-v2/pool_list_updater.go +++ b/pkg/liquidity-source/uniswap-v2/pool_list_updater.go @@ -62,7 +62,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -273,13 +273,20 @@ func (u *PoolsListUpdater) newExtra(fee uint64, feePrecision uint64) ([]byte, er // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/velodrome-v1/pool_list_updater.go b/pkg/liquidity-source/velodrome-v1/pool_list_updater.go index 589d2ad34..1ad7efc70 100644 --- a/pkg/liquidity-source/velodrome-v1/pool_list_updater.go +++ b/pkg/liquidity-source/velodrome-v1/pool_list_updater.go @@ -71,7 +71,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(int(pairFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(int(pairFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -339,13 +339,20 @@ func (u *PoolsListUpdater) newExtra(pairMetadata PairMetadata, factoryData PairF // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/velodrome-v2/pool_list_updater.go b/pkg/liquidity-source/velodrome-v2/pool_list_updater.go index e27c6c8c6..764e41798 100644 --- a/pkg/liquidity-source/velodrome-v2/pool_list_updater.go +++ b/pkg/liquidity-source/velodrome-v2/pool_list_updater.go @@ -71,7 +71,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(int(poolFactoryData.AllPairsLength.Int64()), u.config.NewPoolLimit, offset) poolAddresses, err := u.listPoolAddresses(ctx, offset, batchSize) if err != nil { @@ -337,13 +337,20 @@ func (u *PoolsListUpdater) newStaticExtra(poolMetadata PoolMetadata) ([]byte, er // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": u.config.DexID, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit diff --git a/pkg/liquidity-source/virtual-fun/pool_list_updater.go b/pkg/liquidity-source/virtual-fun/pool_list_updater.go index ffae14021..24c05fafa 100644 --- a/pkg/liquidity-source/virtual-fun/pool_list_updater.go +++ b/pkg/liquidity-source/virtual-fun/pool_list_updater.go @@ -63,7 +63,7 @@ func (u *PoolsListUpdater) GetNewPools(ctx context.Context, metadataBytes []byte Warn("getOffset failed") } - batchSize := getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) + batchSize := u.getBatchSize(allPairsLength, u.config.NewPoolLimit, offset) pairAddresses, err := u.listPairAddresses(ctx, offset, batchSize) if err != nil { @@ -286,13 +286,20 @@ func (u *PoolsListUpdater) newMetadata(newOffset int) ([]byte, error) { // @params limit number of pairs to be fetched in one run // @params offset index of the last pair has been fetched // @returns batchSize -func getBatchSize(length int, limit int, offset int) int { +func (u *PoolsListUpdater) getBatchSize(length int, limit int, offset int) int { if offset == length { return 0 } if offset+limit >= length { - return length - offset + if offset > length { + logger.WithFields(logger.Fields{ + "dex": DexType, + "offset": offset, + "length": length, + }).Warn("[getBatchSize] offset is greater than length") + } + return max(length-offset, 0) } return limit