{
- let mut follower_cfg = FollowerConfigBuilder::default()
- .follow_from(start_from)
- .mithril_snapshot_path(PathBuf::from(snapshot))
- .build();
-
- let follower = match Follower::connect(relay, network, follower_cfg.clone()).await {
- Ok(follower) => follower,
- Err(err) => {
- error!("Unable to bootstrap via mithril snapshot {err}. Trying network..",);
-
- // We know bootstrapping from the snapshot fails, remove path and try from network
- follower_cfg.mithril_snapshot_path = None;
- Follower::connect(relay, network, follower_cfg).await?
- },
- };
-
- Ok(follower)
-}
-
-"#;
diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/get_sync_status.cql b/catalyst-gateway/bin/src/db/index/queries/cql/get_sync_status.cql
new file mode 100644
index 0000000000..713a302b6c
--- /dev/null
+++ b/catalyst-gateway/bin/src/db/index/queries/cql/get_sync_status.cql
@@ -0,0 +1,7 @@
+-- Get all the sync status records.
+SELECT
+ end_slot,
+ start_slot,
+ sync_time,
+ node_id
+FROM sync_status;
diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/insert_sync_status.cql b/catalyst-gateway/bin/src/db/index/queries/cql/insert_sync_status.cql
new file mode 100644
index 0000000000..95aaf9fe95
--- /dev/null
+++ b/catalyst-gateway/bin/src/db/index/queries/cql/insert_sync_status.cql
@@ -0,0 +1,12 @@
+-- Insert an update to the synchronisation status table
+INSERT INTO sync_status (
+ end_slot,
+ start_slot,
+ sync_time,
+ node_id
+) VALUES (
+ :end_slot,
+ :start_slot,
+ :sync_time,
+ :node_id
+);
diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql b/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql
index e74704815c..58a33ad2b3 100644
--- a/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql
+++ b/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql
@@ -1,6 +1,7 @@
+-- Update TXO Spent by the stake address
UPDATE txo_by_stake
-SET spent_slot = :spent_slot
+ SET spent_slot = :spent_slot
WHERE stake_address = :stake_address
-AND txn = :txn
-AND txo = :txo
-AND slot_no = :slot_no
+ AND txn = :txn
+ AND txo = :txo
+ AND slot_no = :slot_no
diff --git a/catalyst-gateway/bin/src/db/index/queries/mod.rs b/catalyst-gateway/bin/src/db/index/queries/mod.rs
index e34db0647e..c14eb303e7 100644
--- a/catalyst-gateway/bin/src/db/index/queries/mod.rs
+++ b/catalyst-gateway/bin/src/db/index/queries/mod.rs
@@ -3,6 +3,7 @@
//! This improves query execution time.
pub(crate) mod staked_ada;
+pub(crate) mod sync_status;
use std::{fmt::Debug, sync::Arc};
@@ -16,6 +17,7 @@ use staked_ada::{
get_txi_by_txn_hash::GetTxiByTxnHashesQuery,
get_txo_by_stake_address::GetTxoByStakeAddressQuery, update_txo_spent::UpdateTxoSpentQuery,
};
+use sync_status::update::SyncStatusInsertQuery;
use super::block::{
certs::CertInsertQuery, cip36::Cip36InsertQuery, txi::TxiInsertQuery, txo::TxoInsertQuery,
@@ -51,7 +53,7 @@ pub(crate) enum PreparedQuery {
TxoSpentUpdateQuery,
}
-/// All prepared SELECT query statements.
+/// All prepared SELECT query statements (return data).
pub(crate) enum PreparedSelectQuery {
/// Get TXO by stake address query.
GetTxoByStakeAddress,
@@ -59,6 +61,12 @@ pub(crate) enum PreparedSelectQuery {
GetTxiByTransactionHash,
}
+/// All prepared UPSERT query statements (inserts/updates a single value of data).
+pub(crate) enum PreparedUpsertQuery {
+ /// Sync Status Insert
+ SyncStatusInsert,
+}
+
/// All prepared queries for a session.
#[allow(clippy::struct_field_names)]
pub(crate) struct PreparedQueries {
@@ -86,6 +94,8 @@ pub(crate) struct PreparedQueries {
txo_by_stake_address_query: PreparedStatement,
/// Get TXI by transaction hash.
txi_by_txn_hash_query: PreparedStatement,
+ /// Insert Sync Status update.
+ sync_status_insert: PreparedStatement,
}
/// An individual query response that can fail
@@ -110,6 +120,7 @@ impl PreparedQueries {
UpdateTxoSpentQuery::prepare_batch(session.clone(), cfg).await;
let txo_by_stake_address_query = GetTxoByStakeAddressQuery::prepare(session.clone()).await;
let txi_by_txn_hash_query = GetTxiByTxnHashesQuery::prepare(session.clone()).await;
+ let sync_status_insert = SyncStatusInsertQuery::prepare(session).await;
let (
txo_insert_queries,
@@ -137,6 +148,7 @@ impl PreparedQueries {
txo_spent_update_queries: txo_spent_update_queries?,
txo_by_stake_address_query: txo_by_stake_address_query?,
txi_by_txn_hash_query: txi_by_txn_hash_query?,
+ sync_status_insert: sync_status_insert?,
})
}
@@ -183,6 +195,25 @@ impl PreparedQueries {
Ok(sized_batches)
}
+ /// Executes a single query with the given parameters.
+ ///
+ /// Returns no data, and an error if the query fails.
+ pub(crate) async fn execute_upsert(
+ &self, session: Arc, upsert_query: PreparedUpsertQuery, params: P,
+ ) -> anyhow::Result<()>
+ where P: SerializeRow {
+ let prepared_stmt = match upsert_query {
+ PreparedUpsertQuery::SyncStatusInsert => &self.sync_status_insert,
+ };
+
+ session
+ .execute_unpaged(prepared_stmt, params)
+ .await
+ .map_err(|e| anyhow::anyhow!(e))?;
+
+ Ok(())
+ }
+
/// Executes a select query with the given parameters.
///
/// Returns an iterator that iterates over all the result pages that the query
diff --git a/catalyst-gateway/bin/src/db/index/queries/sync_status/get.rs b/catalyst-gateway/bin/src/db/index/queries/sync_status/get.rs
new file mode 100644
index 0000000000..804192462b
--- /dev/null
+++ b/catalyst-gateway/bin/src/db/index/queries/sync_status/get.rs
@@ -0,0 +1,193 @@
+//! Get Sync Status query
+
+use futures::StreamExt;
+use tracing::{debug, warn};
+
+use super::update::row::SyncStatusQueryParams;
+use crate::{db::index::session::CassandraSession, service::utilities::convert::from_saturating};
+
+/// Get TXI query string.
+const GET_SYNC_STATUS: &str = include_str!("../cql/get_sync_status.cql");
+
+/// Clean Sync Status Response
+#[derive(PartialEq, Debug)]
+pub(crate) struct SyncStatus {
+ /// End Slot.
+ pub(crate) end_slot: u64,
+ /// Start Slot.
+ pub(crate) start_slot: u64,
+ /// Sync Time.
+ pub(crate) sync_time: u64,
+ /// Node ID
+ pub(crate) node_id: String,
+}
+
+/// Convert a big uint to a u64, saturating if its out of range.
+fn big_uint_to_u64(value: &num_bigint::BigInt) -> u64 {
+ let (sign, digits) = value.to_u64_digits();
+ if sign == num_bigint::Sign::Minus || digits.is_empty() {
+ return 0;
+ }
+ if digits.len() > 1 {
+ return u64::MAX;
+ }
+ // 100% safe due to the above checks.
+ #[allow(clippy::indexing_slicing)]
+ digits[0]
+}
+
+/// Merge consecutive sync records, to make processing them easier.
+fn merge_consecutive_sync_records(mut synced_chunks: Vec) -> Vec {
+ // Sort the chunks by the starting key, if the ending key overlaps, we will deal with that
+ // during the merge.
+ synced_chunks.sort_by_key(|rec| rec.start_slot);
+
+ let mut best_sync: Vec = vec![];
+ let mut current_status: Option = None;
+ for rec in synced_chunks {
+ if let Some(current) = current_status.take() {
+ if rec.start_slot >= current.start_slot && rec.end_slot <= current.end_slot {
+ // The new record is contained fully within the previous one.
+ // We will ignore the new record and use the previous one instead.
+ current_status = Some(current);
+ } else if rec.start_slot <= current.end_slot + 1 {
+ // Either overlaps, or is directly consecutive.
+ // But not fully contained within the previous one.
+ current_status = Some(SyncStatus {
+ end_slot: rec.end_slot,
+ start_slot: current.start_slot,
+ sync_time: rec.sync_time.max(current.sync_time),
+ node_id: rec.node_id,
+ });
+ } else {
+ // Not consecutive, so store it.
+ // And set a new current one.
+ best_sync.push(current);
+ current_status = Some(rec);
+ }
+ } else {
+ current_status = Some(rec);
+ }
+ }
+ // Could have the final one in current still, so store it
+ if let Some(current) = current_status.take() {
+ best_sync.push(current);
+ }
+
+ best_sync
+}
+
+/// Get the sync status.
+///
+/// Note: This only happens once when a node starts. So there is no need to prepare it.
+/// It is also only ever run on the persistent database.
+///
+/// Regarding failures:
+/// Failures of this function will simply cause the node to re-sync which is non fatal.
+pub(crate) async fn get_sync_status() -> Vec {
+ let mut synced_chunks: Vec = vec![];
+
+ let Some(session) = CassandraSession::get(true) else {
+ warn!("Failed to get Cassandra Session, trying to get current indexing status");
+ return synced_chunks;
+ };
+
+ // Get the raw underlying session, so we can do an unprepared simple query.
+ let session = session.get_raw_session();
+
+ let mut results = match session.query_iter(GET_SYNC_STATUS, ()).await {
+ Ok(result) => result.into_typed::(),
+ Err(err) => {
+ warn!(error=%err, "Failed to get sync status results from query.");
+ return synced_chunks;
+ },
+ };
+
+ // Get all the sync records, and de-cassandra-ize the values
+ while let Some(next_row) = results.next().await {
+ match next_row {
+ Err(err) => warn!(error=%err, "Failed to deserialize sync status results from query."),
+ Ok(row) => {
+ debug!("Sync Status: {:?}", row);
+ synced_chunks.push(SyncStatus {
+ end_slot: big_uint_to_u64(&row.end_slot),
+ start_slot: big_uint_to_u64(&row.start_slot),
+ sync_time: from_saturating(row.sync_time.0),
+ node_id: row.node_id,
+ });
+ },
+ }
+ }
+
+ merge_consecutive_sync_records(synced_chunks)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ /// This test checks we can properly merge sync status chunks.
+ fn test_sync_merge() {
+ // Add some test records, out of order.
+ // Two mergeable groups
+ let synced_chunks: Vec = vec![
+ SyncStatus {
+ end_slot: 200_000,
+ start_slot: 112_001,
+ sync_time: 1_200_000,
+ node_id: "test-node-1".to_string(),
+ },
+ SyncStatus {
+ end_slot: 12000,
+ start_slot: 0,
+ sync_time: 100_100,
+ node_id: "test-node-1".to_string(),
+ },
+ SyncStatus {
+ end_slot: 99000,
+ start_slot: 56789,
+ sync_time: 200_000,
+ node_id: "test-node-2".to_string(),
+ },
+ SyncStatus {
+ end_slot: 112_000,
+ start_slot: 100_000,
+ sync_time: 1_100_100,
+ node_id: "test-node-1".to_string(),
+ },
+ SyncStatus {
+ end_slot: 56789,
+ start_slot: 12300,
+ sync_time: 200_000,
+ node_id: "test-node-2".to_string(),
+ },
+ SyncStatus {
+ end_slot: 12345,
+ start_slot: 0,
+ sync_time: 100_000,
+ node_id: "test-node-1".to_string(),
+ },
+ ];
+
+ let merged_syncs_status = merge_consecutive_sync_records(synced_chunks);
+
+ // Expected result
+ let expected: &[SyncStatus] = &[
+ SyncStatus {
+ end_slot: 99000,
+ start_slot: 0,
+ sync_time: 200_000,
+ node_id: "test-node-2".to_string(),
+ },
+ SyncStatus {
+ end_slot: 200_000,
+ start_slot: 100_000,
+ sync_time: 1_200_000,
+ node_id: "test-node-1".to_string(),
+ },
+ ];
+
+ assert_eq!(merged_syncs_status.as_slice(), expected);
+ }
+}
diff --git a/catalyst-gateway/bin/src/db/index/queries/sync_status/mod.rs b/catalyst-gateway/bin/src/db/index/queries/sync_status/mod.rs
new file mode 100644
index 0000000000..9f2fa40cc5
--- /dev/null
+++ b/catalyst-gateway/bin/src/db/index/queries/sync_status/mod.rs
@@ -0,0 +1,4 @@
+//! sync status update and query.
+
+pub(crate) mod get;
+pub(crate) mod update;
diff --git a/catalyst-gateway/bin/src/db/index/queries/sync_status/update.rs b/catalyst-gateway/bin/src/db/index/queries/sync_status/update.rs
new file mode 100644
index 0000000000..03563d2f71
--- /dev/null
+++ b/catalyst-gateway/bin/src/db/index/queries/sync_status/update.rs
@@ -0,0 +1,122 @@
+//! Read and write the synchronisation status.
+
+use std::{sync::Arc, time::SystemTime};
+
+use row::SyncStatusQueryParams;
+use scylla::{frame::value::CqlTimestamp, prepared_statement::PreparedStatement, Session};
+use tokio::task;
+use tracing::{error, warn};
+
+use crate::{
+ db::index::{
+ queries::{PreparedQueries, PreparedUpsertQuery},
+ session::CassandraSession,
+ },
+ service::utilities::convert::from_saturating,
+ settings::Settings,
+};
+
+/// Insert Sync Status query string.
+const INSERT_SYNC_STATUS_QUERY: &str = include_str!("../cql/insert_sync_status.cql");
+
+/// Sync Status Row Record Module
+#[allow(clippy::expect_used)]
+pub(super) mod row {
+ use scylla::{frame::value::CqlTimestamp, FromRow, SerializeRow};
+
+ /// Sync Status Record Row (used for both Insert and Query response)
+ #[derive(SerializeRow, FromRow, Debug)]
+ pub(crate) struct SyncStatusQueryParams {
+ /// End Slot.
+ pub(crate) end_slot: num_bigint::BigInt,
+ /// Start Slot.
+ pub(crate) start_slot: num_bigint::BigInt,
+ /// Sync Time.
+ pub(crate) sync_time: CqlTimestamp,
+ /// Node ID
+ pub(crate) node_id: String,
+ }
+}
+
+impl SyncStatusQueryParams {
+ /// Create a new instance of [`SyncStatusQueryParams`]
+ pub(crate) fn new(end_slot: u64, start_slot: u64) -> Self {
+ let sync_time = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
+ Ok(now) => now.as_millis(),
+ Err(_) => 0, // Shouldn't actually happen.
+ };
+
+ Self {
+ end_slot: end_slot.into(),
+ start_slot: start_slot.into(),
+ sync_time: CqlTimestamp(from_saturating(sync_time)),
+ node_id: Settings::service_id().to_owned(),
+ }
+ }
+}
+
+/// Sync Status Insert query.
+pub(crate) struct SyncStatusInsertQuery;
+
+impl SyncStatusInsertQuery {
+ /// Prepares a Sync Status Insert query.
+ pub(crate) async fn prepare(session: Arc) -> anyhow::Result {
+ let sync_status_insert_query = PreparedQueries::prepare(
+ session,
+ INSERT_SYNC_STATUS_QUERY,
+ scylla::statement::Consistency::All,
+ true,
+ )
+ .await;
+
+ if let Err(ref error) = sync_status_insert_query {
+ error!(error=%error, "Failed to prepare get Sync Status Insert query.");
+ };
+
+ sync_status_insert_query
+ }
+
+ /// Executes a sync status insert query.
+ pub(crate) async fn execute(
+ session: &CassandraSession, params: SyncStatusQueryParams,
+ ) -> anyhow::Result<()> {
+ session
+ .execute_upsert(PreparedUpsertQuery::SyncStatusInsert, params)
+ .await
+ }
+}
+
+/// Update the sync status of the immutable database.
+///
+/// Note: There is no need to update the sync status of the volatile database.
+///
+/// Regarding failures:
+/// Failures of this function to record status, fail safely.
+/// This data is only used to recover sync
+/// There fore this function is both fire and forget, and returns no status.
+pub(crate) fn update_sync_status(end_slot: u64, start_slot: u64) {
+ task::spawn(async move {
+ let Some(session) = CassandraSession::get(true) else {
+ warn!(
+ start_slot = start_slot,
+ end_slot = end_slot,
+ "Failed to get Cassandra Session, trying to record indexing status"
+ );
+ return;
+ };
+
+ if let Err(err) = SyncStatusInsertQuery::execute(
+ &session,
+ SyncStatusQueryParams::new(end_slot, start_slot),
+ )
+ .await
+ {
+ warn!(
+ error=%err,
+ start_slot = start_slot,
+ end_slot = end_slot,
+ "Failed to store Sync Status"
+ );
+ };
+ });
+}
diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/sync_status.cql b/catalyst-gateway/bin/src/db/index/schema/cql/sync_status.cql
index 7f82d255e0..0e2494dfb3 100644
--- a/catalyst-gateway/bin/src/db/index/schema/cql/sync_status.cql
+++ b/catalyst-gateway/bin/src/db/index/schema/cql/sync_status.cql
@@ -5,7 +5,7 @@ CREATE TABLE IF NOT EXISTS sync_status (
end_slot varint, -- The slot that has been indexed up-to (inclusive).
start_slot varint, -- The slot the sync block started at (inclusive).
sync_time timestamp, -- The time we finished the sync.
- node_id uuid, -- The node that synced this data.
+ node_id text, -- The node that synced this data.
PRIMARY KEY (end_slot, start_slot, sync_time, node_id)
);
diff --git a/catalyst-gateway/bin/src/db/index/schema/mod.rs b/catalyst-gateway/bin/src/db/index/schema/mod.rs
index 61e2f2057b..076e886d36 100644
--- a/catalyst-gateway/bin/src/db/index/schema/mod.rs
+++ b/catalyst-gateway/bin/src/db/index/schema/mod.rs
@@ -17,7 +17,7 @@ use crate::{settings::cassandra_db, utils::blake2b_hash::generate_uuid_string_fr
/// change accidentally, and is NOT to be used directly to set the schema version of the
/// table namespaces.
#[allow(dead_code)]
-const SCHEMA_VERSION: &str = "a0e54866-1f30-8ad2-9ac7-df1cfaf9c634";
+const SCHEMA_VERSION: &str = "10463640-3b7b-8a25-9d42-5eb64e44bd62";
/// Keyspace Create (Templated)
const CREATE_NAMESPACE_CQL: &str = include_str!("./cql/namespace.cql");
diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs
index 41c447634c..2f29c321eb 100644
--- a/catalyst-gateway/bin/src/db/index/session.rs
+++ b/catalyst-gateway/bin/src/db/index/session.rs
@@ -16,7 +16,10 @@ use tokio::fs;
use tracing::{error, info};
use super::{
- queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, PreparedSelectQuery},
+ queries::{
+ FallibleQueryResults, PreparedQueries, PreparedQuery, PreparedSelectQuery,
+ PreparedUpsertQuery,
+ },
schema::create_schema,
};
use crate::{
@@ -132,6 +135,22 @@ impl CassandraSession {
queries.execute_batch(session, cfg, query, values).await
}
+
+ /// Execute a query which returns no results, except an error if it fails.
+ /// Can not be batched, takes a single set of parameters.
+ pub(crate) async fn execute_upsert(
+ &self, query: PreparedUpsertQuery, value: T,
+ ) -> anyhow::Result<()> {
+ let session = self.session.clone();
+ let queries = self.queries.clone();
+
+ queries.execute_upsert(session, query, value).await
+ }
+
+ /// Get underlying Raw Cassandra Session.
+ pub(crate) fn get_raw_session(&self) -> Arc {
+ self.session.clone()
+ }
}
/// Create a new execution profile based on the given configuration.
diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs
index 5ad8f1fef2..e729f127f4 100644
--- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs
+++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs
@@ -103,6 +103,9 @@ async fn calculate_stake_info(
}
check_and_set_spent(&session, &mut txos_by_txn).await?;
+ // TODO: This could be executed in the background, it does not actually matter if it
+ // succeeds. This is just an optimization step to reduce the need to query spent
+ // TXO's.
update_spent(&session, stake_address_bytes, &txos_by_txn).await?;
let stake_info = build_stake_info(txos_by_txn)?;
diff --git a/catalyst-gateway/bin/src/settings/chain_follower.rs b/catalyst-gateway/bin/src/settings/chain_follower.rs
index 9311f71cbc..2159e31681 100644
--- a/catalyst-gateway/bin/src/settings/chain_follower.rs
+++ b/catalyst-gateway/bin/src/settings/chain_follower.rs
@@ -16,6 +16,15 @@ const DEFAULT_SYNC_TASKS: u16 = 16;
/// Maximum number of sync tasks (must be in the range 1 to 256 inclusive.)
const MAX_SYNC_TASKS: u16 = 256;
+/// Default number of slots each sync task will process at one time.
+/// This default is just over one week worth of data where 1 slot == 1 second.
+const DEFAULT_SYNC_MAX_SLOTS: u64 = 700_000;
+/// Minimum the number of slots each sync task will process at one time can be set to.
+/// Note: This is just the setting minimum, a sync task may sync as few as a 1 slot.
+const MIN_SYNC_MAX_SLOTS: u64 = 10_000;
+/// Maximum the number of slots each sync task will process at one time can be set to.
+const MAX_SYNC_MAX_SLOTS: u64 = 100_000_000;
+
/// Maximum number of DL Connections (must be in the range 1 to 256 inclusive.)
const MAX_DL_CONNECTIONS: usize = 256;
@@ -40,6 +49,9 @@ pub(crate) struct EnvVars {
/// The maximum number of sync tasks.
pub(crate) sync_tasks: u16,
+ /// The maximum number of slots a sync task will process at once.
+ pub(crate) sync_chunk_max_slots: u64,
+
/// The Mithril Downloader Configuration.
pub(crate) dl_config: DlConfig,
}
@@ -55,6 +67,13 @@ impl EnvVars {
MAX_SYNC_TASKS,
);
+ let sync_slots: u64 = StringEnvVar::new_as(
+ "CHAIN_FOLLOWER_SYNC_MAX_SLOTS",
+ DEFAULT_SYNC_MAX_SLOTS,
+ MIN_SYNC_MAX_SLOTS,
+ MAX_SYNC_MAX_SLOTS,
+ );
+
let cfg = ChainSyncConfig::default_for(chain);
let mut dl_config = cfg.mithril_cfg.dl_config.clone().unwrap_or_default();
@@ -119,6 +138,7 @@ impl EnvVars {
Self {
chain,
sync_tasks,
+ sync_chunk_max_slots: sync_slots,
dl_config,
}
}
diff --git a/catalyst-gateway/event-db/Earthfile b/catalyst-gateway/event-db/Earthfile
index 1d35538d73..66ccff08b9 100644
--- a/catalyst-gateway/event-db/Earthfile
+++ b/catalyst-gateway/event-db/Earthfile
@@ -3,7 +3,7 @@
# the database and its associated software.
VERSION 0.8
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.10 AS postgresql-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.14 AS postgresql-ci
# cspell: words
diff --git a/catalyst-gateway/tests/Earthfile b/catalyst-gateway/tests/Earthfile
index 5c12cc9fa0..e902d93768 100644
--- a/catalyst-gateway/tests/Earthfile
+++ b/catalyst-gateway/tests/Earthfile
@@ -1,5 +1,5 @@
VERSION 0.8
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.10 AS spectral-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.14 AS spectral-ci
# test-lint-openapi - OpenAPI linting from an artifact
# testing whether the OpenAPI generated during build stage follows good practice.
diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile
index ebbb339d6e..aa9bd062f8 100644
--- a/catalyst-gateway/tests/api_tests/Earthfile
+++ b/catalyst-gateway/tests/api_tests/Earthfile
@@ -1,6 +1,6 @@
VERSION 0.8
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.10 AS python-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.14 AS python-ci
builder:
FROM python-ci+python-base
diff --git a/catalyst_voices/Earthfile b/catalyst_voices/Earthfile
index 0339877018..31ec489635 100644
--- a/catalyst_voices/Earthfile
+++ b/catalyst_voices/Earthfile
@@ -1,7 +1,7 @@
VERSION 0.8
IMPORT ../catalyst-gateway AS catalyst-gateway
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.10 AS flutter-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.14 AS flutter-ci
# Copy all the necessary files and running bootstrap
builder:
diff --git a/catalyst_voices/uikit_example/Earthfile b/catalyst_voices/uikit_example/Earthfile
index eed673c6ce..8f3e3cdcbe 100644
--- a/catalyst_voices/uikit_example/Earthfile
+++ b/catalyst_voices/uikit_example/Earthfile
@@ -1,7 +1,7 @@
VERSION 0.8
IMPORT ../ AS catalyst-voices
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.10 AS flutter-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.14 AS flutter-ci
# local-build-web - build web version of UIKit example.
# Prefixed by "local" to make sure it's not auto triggered, the target was
diff --git a/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile b/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile
index 3ae373157a..bad2748bd8 100644
--- a/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile
+++ b/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile
@@ -1,6 +1,6 @@
VERSION 0.8
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.10 AS flutter-ci
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/playwright:v3.2.10 AS playwright-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.14 AS flutter-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/playwright:v3.2.14 AS playwright-ci
deps:
DO playwright-ci+SETUP --workdir=/wallet-automation
diff --git a/docs/Earthfile b/docs/Earthfile
index a027fcdece..a042410367 100644
--- a/docs/Earthfile
+++ b/docs/Earthfile
@@ -1,6 +1,6 @@
VERSION 0.8
-IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.10 AS docs-ci
+IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.14 AS docs-ci
IMPORT .. AS repo
IMPORT ../catalyst-gateway AS catalyst-gateway
diff --git a/utilities/local-scylla/justfile b/utilities/local-scylla/justfile
index 2cbf82e207..7d215b2cb3 100644
--- a/utilities/local-scylla/justfile
+++ b/utilities/local-scylla/justfile
@@ -36,7 +36,7 @@ scylla-dev-db-reset-cluster: scylla-dev-db-purge scylla-dev-db-cluster
# Run CQLSH on the dev Scylla cluster
scylla-dev-db-cqlsh:
- docker run --rm -it scylladb/scylla-cqlsh `hostname` 9043
+ docker run --rm -it scylladb/scylla-cqlsh "{{host_ip}}" 9042
# Run Nodetool on the dev Scylla cluster to dump status info.
scylla-dev-db-nodetool: