From f87562d0b1192ff7b0496494252d3de4a0aafae3 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul <8294320+gligneul@users.noreply.github.com> Date: Thu, 21 Sep 2023 11:01:38 -0300 Subject: [PATCH] feat!: add input status to GraphQL API Add a new field to the input object in the GraphQL API and a new column in the Postgres input table. Node runners should clean their database before upgrading and update the GraphQL schema. --- CHANGELOG.md | 1 + .../src/server_manager/conversions.rs | 30 ++- .../src/server_manager/facade.rs | 14 +- .../20230921143147_completion_status/down.sql | 5 + .../20230921143147_completion_status/up.sql | 14 ++ offchain/data/src/lib.rs | 4 +- offchain/data/src/repository.rs | 25 ++- offchain/data/src/schema.rs | 8 + offchain/data/src/types.rs | 61 +++++- offchain/data/tests/repository.rs | 27 ++- .../graphql-server/src/schema/resolvers.rs | 49 ++++- offchain/graphql-server/tests/integration.rs | 6 +- .../graphql-server/tests/queries/input.json | 2 +- .../graphql-server/tests/responses/input.json | 2 +- offchain/indexer/src/conversions.rs | 29 ++- offchain/indexer/src/indexer.rs | 4 + offchain/rollups-events/src/lib.rs | 6 +- .../rollups-events/src/rollups_outputs.rs | 19 ++ offchain/schema.graphql | 205 ------------------ 19 files changed, 278 insertions(+), 233 deletions(-) create mode 100644 offchain/data/migrations/20230921143147_completion_status/down.sql create mode 100644 offchain/data/migrations/20230921143147_completion_status/up.sql delete mode 100644 offchain/schema.graphql diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b00164a9..3b0a7f3f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added support to `POST` *inspect state* requests - Added snapshot validation. The node will now check whether the snapshot's template hash matches the one stored in the blockchain - Added `cartesi/rollups-node` docker image with all node binaries +- Added completion status to GraphQL API ### Changed diff --git a/offchain/advance-runner/src/server_manager/conversions.rs b/offchain/advance-runner/src/server_manager/conversions.rs index bb155ba76..bd05231ec 100644 --- a/offchain/advance-runner/src/server_manager/conversions.rs +++ b/offchain/advance-runner/src/server_manager/conversions.rs @@ -5,11 +5,12 @@ //! rollups-events types use grpc_interfaces::cartesi_machine::Hash; use grpc_interfaces::cartesi_server_manager::{ - Address, OutputEnum, OutputValidityProof, Proof, + Address, CompletionStatus, OutputEnum, OutputValidityProof, Proof, }; use rollups_events::{ - Address as RollupsAddress, Hash as RollupsHash, Payload, RollupsOutputEnum, - RollupsOutputValidityProof, RollupsProof, ADDRESS_SIZE, HASH_SIZE, + Address as RollupsAddress, Hash as RollupsHash, Payload, + RollupsCompletionStatus, RollupsOutputEnum, RollupsOutputValidityProof, + RollupsProof, ADDRESS_SIZE, HASH_SIZE, }; use super::error::ServerManagerError; @@ -33,6 +34,29 @@ macro_rules! get_field { // Export the get_field macro for other modules to use pub(super) use get_field; +/// Convert gRPC completion status to broker equivalent +pub fn convert_completion_status( + status: CompletionStatus, +) -> RollupsCompletionStatus { + match status { + CompletionStatus::Accepted => RollupsCompletionStatus::Accepted, + CompletionStatus::Rejected => RollupsCompletionStatus::Rejected, + CompletionStatus::Exception => RollupsCompletionStatus::Exception, + CompletionStatus::MachineHalted => { + RollupsCompletionStatus::MachineHalted + } + CompletionStatus::CycleLimitExceeded => { + RollupsCompletionStatus::CycleLimitExceeded + } + CompletionStatus::TimeLimitExceeded => { + RollupsCompletionStatus::TimeLimitExceeded + } + CompletionStatus::PayloadLengthLimitExceeded => { + RollupsCompletionStatus::PayloadLengthLimitExceeded + } + } +} + /// Convert gRPC hash to broker equivalent pub fn convert_hash(hash: Hash) -> Result { hash.data.try_into().map(RollupsHash::new).map_err(|data| { diff --git a/offchain/advance-runner/src/server_manager/facade.rs b/offchain/advance-runner/src/server_manager/facade.rs index 84ef4387b..7cef548e9 100644 --- a/offchain/advance-runner/src/server_manager/facade.rs +++ b/offchain/advance-runner/src/server_manager/facade.rs @@ -3,8 +3,8 @@ use backoff::{future::retry, Error, ExponentialBackoff}; use rollups_events::{ - InputMetadata as RollupsInputMetadata, Payload, RollupsClaim, - RollupsNotice, RollupsOutput, RollupsReport, RollupsVoucher, + InputMetadata as RollupsInputMetadata, Payload, RollupsAdvanceResult, + RollupsClaim, RollupsNotice, RollupsOutput, RollupsReport, RollupsVoucher, }; use snafu::{OptionExt, ResultExt}; use std::path::Path; @@ -23,7 +23,8 @@ use grpc_interfaces::cartesi_server_manager::{ use super::claim::compute_epoch_hash; use super::config::ServerManagerConfig; use super::conversions::{ - convert_address, convert_hash, convert_proof, get_field, + convert_address, convert_completion_status, convert_hash, convert_proof, + get_field, }; use super::error::{ ConnectionSnafu, EmptyEpochSnafu, InvalidProcessedInputSnafu, @@ -223,6 +224,13 @@ impl ServerManagerFacade { let mut outputs = vec![]; + let status = convert_completion_status(processed_input.status()); + let result = RollupsAdvanceResult { + input_index: current_input_index, + status, + }; + outputs.push(RollupsOutput::AdvanceResult(result)); + for (index, report) in processed_input.reports.into_iter().enumerate() { let report = RollupsReport { index: index as u64, diff --git a/offchain/data/migrations/20230921143147_completion_status/down.sql b/offchain/data/migrations/20230921143147_completion_status/down.sql new file mode 100644 index 000000000..d392633ef --- /dev/null +++ b/offchain/data/migrations/20230921143147_completion_status/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` + +ALTER TABLE "inputs" DROP "status"; + +DROP TYPE "CompletionStatus"; diff --git a/offchain/data/migrations/20230921143147_completion_status/up.sql b/offchain/data/migrations/20230921143147_completion_status/up.sql new file mode 100644 index 000000000..c4d02f0f6 --- /dev/null +++ b/offchain/data/migrations/20230921143147_completion_status/up.sql @@ -0,0 +1,14 @@ +-- Your SQL goes here + +CREATE TYPE "CompletionStatus" AS ENUM ( + 'Unprocessed', + 'Accepted', + 'Rejected', + 'Exception', + 'MachineHalted', + 'CycleLimitExceeded', + 'TimeLimitExceeded', + 'PayloadLengthLimitExceeded' +); + +ALTER TABLE "inputs" ADD "status" "CompletionStatus" NOT NULL DEFAULT 'Unprocessed'; diff --git a/offchain/data/src/lib.rs b/offchain/data/src/lib.rs index cae2b813e..8d22f5d30 100644 --- a/offchain/data/src/lib.rs +++ b/offchain/data/src/lib.rs @@ -15,6 +15,6 @@ pub use migrations::{run_migrations, MigrationError}; pub use pagination::{Connection, Cursor, Edge, PageInfo}; pub use repository::Repository; pub use types::{ - Input, InputQueryFilter, Notice, NoticeQueryFilter, OutputEnum, Proof, - Report, ReportQueryFilter, Voucher, VoucherQueryFilter, + CompletionStatus, Input, InputQueryFilter, Notice, NoticeQueryFilter, + OutputEnum, Proof, Report, ReportQueryFilter, Voucher, VoucherQueryFilter, }; diff --git a/offchain/data/src/repository.rs b/offchain/data/src/repository.rs index 166aa63cb..f51583bbf 100644 --- a/offchain/data/src/repository.rs +++ b/offchain/data/src/repository.rs @@ -4,7 +4,7 @@ use backoff::ExponentialBackoff; use diesel::pg::{Pg, PgConnection}; use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; -use diesel::{insert_into, prelude::*}; +use diesel::{insert_into, prelude::*, update}; use snafu::ResultExt; use std::sync::Arc; @@ -13,8 +13,8 @@ use super::error::{DatabaseConnectionSnafu, DatabaseSnafu, Error}; use super::pagination::{Connection, Pagination}; use super::schema; use super::types::{ - Input, InputQueryFilter, Notice, NoticeQueryFilter, OutputEnum, Proof, - Report, ReportQueryFilter, Voucher, VoucherQueryFilter, + CompletionStatus, Input, InputQueryFilter, Notice, NoticeQueryFilter, + OutputEnum, Proof, Report, ReportQueryFilter, Voucher, VoucherQueryFilter, }; pub const POOL_CONNECTION_SIZE: u32 = 3; @@ -223,6 +223,25 @@ impl Repository { } } +/// Update operations +impl Repository { + pub fn update_input_status( + &self, + input_index: i32, + status: CompletionStatus, + ) -> Result<(), Error> { + use schema::inputs; + let mut conn = self.conn()?; + update(inputs::table) + .filter(inputs::dsl::index.eq(input_index)) + .set(inputs::status.eq(status)) + .execute(&mut conn) + .context(DatabaseSnafu)?; + tracing::trace!("Set {:?} status to input {}", status, input_index); + Ok(()) + } +} + /// Generate a boxed query from an input query filter impl InputQueryFilter { fn to_query(&self) -> schema::inputs::BoxedQuery<'_, Pg> { diff --git a/offchain/data/src/schema.rs b/offchain/data/src/schema.rs index 0f3c7138e..69e188ca5 100644 --- a/offchain/data/src/schema.rs +++ b/offchain/data/src/schema.rs @@ -1,12 +1,19 @@ // @generated automatically by Diesel CLI. pub mod sql_types { + #[derive(diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "CompletionStatus"))] + pub struct CompletionStatus; + #[derive(diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "OutputEnum"))] pub struct OutputEnum; } diesel::table! { + use diesel::sql_types::*; + use super::sql_types::CompletionStatus; + inputs (index) { index -> Int4, msg_sender -> Bytea, @@ -14,6 +21,7 @@ diesel::table! { block_number -> Int8, timestamp -> Timestamp, payload -> Bytea, + status -> CompletionStatus, } } diff --git a/offchain/data/src/types.rs b/offchain/data/src/types.rs index 82833fd3b..40d9187c0 100644 --- a/offchain/data/src/types.rs +++ b/offchain/data/src/types.rs @@ -8,10 +8,66 @@ use diesel::{AsExpression, Insertable, Queryable, QueryableByName}; use std::io::Write; use super::schema::{ - inputs, notices, proofs, reports, sql_types::OutputEnum as SQLOutputEnum, - vouchers, + inputs, notices, proofs, reports, + sql_types::CompletionStatus as SQLCompletionStatus, + sql_types::OutputEnum as SQLOutputEnum, vouchers, }; +#[derive(Debug, PartialEq, Eq, Clone, Copy, FromSqlRow, AsExpression)] +#[diesel(sql_type = SQLCompletionStatus)] +pub enum CompletionStatus { + Unprocessed, + Accepted, + Rejected, + Exception, + MachineHalted, + CycleLimitExceeded, + TimeLimitExceeded, + PayloadLengthLimitExceeded, +} + +impl ToSql for CompletionStatus { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + match *self { + CompletionStatus::Unprocessed => out.write_all(b"Unprocessed")?, + CompletionStatus::Accepted => out.write_all(b"Accepted")?, + CompletionStatus::Rejected => out.write_all(b"Rejected")?, + CompletionStatus::Exception => out.write_all(b"Exception")?, + CompletionStatus::MachineHalted => { + out.write_all(b"MachineHalted")? + } + CompletionStatus::CycleLimitExceeded => { + out.write_all(b"CycleLimitExceeded")? + } + CompletionStatus::TimeLimitExceeded => { + out.write_all(b"TimeLimitExceeded")? + } + CompletionStatus::PayloadLengthLimitExceeded => { + out.write_all(b"PayloadLengthLimitExceeded")? + } + } + Ok(IsNull::No) + } +} + +impl FromSql for CompletionStatus { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + match bytes.as_bytes() { + b"Unprocessed" => Ok(CompletionStatus::Unprocessed), + b"Accepted" => Ok(CompletionStatus::Accepted), + b"Rejected" => Ok(CompletionStatus::Rejected), + b"Exception" => Ok(CompletionStatus::Exception), + b"MachineHalted" => Ok(CompletionStatus::MachineHalted), + b"CycleLimitExceeded" => Ok(CompletionStatus::CycleLimitExceeded), + b"TimeLimitExceeded" => Ok(CompletionStatus::TimeLimitExceeded), + b"PayloadLengthLimitExceeded" => { + Ok(CompletionStatus::PayloadLengthLimitExceeded) + } + _ => Err("Unrecognized enum variant".into()), + } + } +} + #[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] #[diesel(table_name = inputs)] pub struct Input { @@ -21,6 +77,7 @@ pub struct Input { pub block_number: i64, pub timestamp: std::time::SystemTime, pub payload: Vec, + pub status: CompletionStatus, } #[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] diff --git a/offchain/data/tests/repository.rs b/offchain/data/tests/repository.rs index 7937bdc13..e7b7322fc 100644 --- a/offchain/data/tests/repository.rs +++ b/offchain/data/tests/repository.rs @@ -9,8 +9,8 @@ use diesel::{ use redacted::Redacted; use rollups_data::Connection as PaginationConnection; use rollups_data::{ - Cursor, Edge, Error, Input, InputQueryFilter, Notice, PageInfo, Proof, - Report, Repository, RepositoryConfig, Voucher, + CompletionStatus, Cursor, Edge, Error, Input, InputQueryFilter, Notice, + PageInfo, Proof, Report, Repository, RepositoryConfig, Voucher, }; use serial_test::serial; use std::time::{Duration, UNIX_EPOCH}; @@ -70,6 +70,7 @@ pub fn insert_test_input(repo: &Repository) { block_number: 0, timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), payload: "input-0".as_bytes().to_vec(), + status: CompletionStatus::Accepted, }; repo.insert_input(input) @@ -84,6 +85,7 @@ pub fn create_input() -> Input { block_number: 0, timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), payload: "input-0".as_bytes().to_vec(), + status: CompletionStatus::Accepted, } } @@ -176,6 +178,26 @@ fn test_get_input_error() { )); } +#[test] +#[serial] +fn test_update_input_status() { + let docker = Cli::default(); + let test = TestState::setup(&docker); + let repo = test.get_repository(); + + let mut input = create_input(); + input.status = CompletionStatus::Unprocessed; + + repo.insert_input(input.clone()) + .expect("Failed to insert input"); + repo.update_input_status(0, CompletionStatus::Accepted) + .expect("Failed to update input status"); + + let get_input = repo.get_input(0).expect("Failed to get input"); + + assert_eq!(get_input.status, CompletionStatus::Accepted); +} + #[test] #[serial] fn test_insert_notice() { @@ -583,6 +605,7 @@ fn test_pagination_macro() { block_number: 0, timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), payload: "input-1".as_bytes().to_vec(), + status: CompletionStatus::Accepted, }; repo.insert_input(input0.clone()) diff --git a/offchain/graphql-server/src/schema/resolvers.rs b/offchain/graphql-server/src/schema/resolvers.rs index 5c7426283..44b1d1c07 100644 --- a/offchain/graphql-server/src/schema/resolvers.rs +++ b/offchain/graphql-server/src/schema/resolvers.rs @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use juniper::{ - graphql_object, DefaultScalarValue, FieldError, FieldResult, + graphql_object, DefaultScalarValue, FieldError, FieldResult, GraphQLEnum, GraphQLInputObject, GraphQLObject, }; use std::time::UNIX_EPOCH; use rollups_data::Repository; use rollups_data::{ - Connection, Edge, Input, InputQueryFilter, Notice, NoticeQueryFilter, - OutputEnum, PageInfo as DbPageInfo, Proof, Report, ReportQueryFilter, - Voucher, VoucherQueryFilter, + CompletionStatus as DbCompletionStatus, Connection, Edge, Input, + InputQueryFilter, Notice, NoticeQueryFilter, OutputEnum, + PageInfo as DbPageInfo, Proof, Report, ReportQueryFilter, Voucher, + VoucherQueryFilter, }; use super::scalar::RollupsGraphQLScalarValue; @@ -193,6 +194,41 @@ impl Query { } } +#[derive(GraphQLEnum)] +enum CompletionStatus { + Unprocessed, + Accepted, + Rejected, + Exception, + MachineHalted, + CycleLimitExceeded, + TimeLimitExceeded, + PayloadLengthLimitExceeded, +} + +impl From for CompletionStatus { + fn from(status: DbCompletionStatus) -> CompletionStatus { + match status { + DbCompletionStatus::Unprocessed => CompletionStatus::Unprocessed, + DbCompletionStatus::Accepted => CompletionStatus::Accepted, + DbCompletionStatus::Rejected => CompletionStatus::Rejected, + DbCompletionStatus::Exception => CompletionStatus::Exception, + DbCompletionStatus::MachineHalted => { + CompletionStatus::MachineHalted + } + DbCompletionStatus::CycleLimitExceeded => { + CompletionStatus::CycleLimitExceeded + } + DbCompletionStatus::TimeLimitExceeded => { + CompletionStatus::TimeLimitExceeded + } + DbCompletionStatus::PayloadLengthLimitExceeded => { + CompletionStatus::PayloadLengthLimitExceeded + } + } + } +} + #[graphql_object( context = Context, Scalar = RollupsGraphQLScalarValue, @@ -204,6 +240,11 @@ impl Input { self.index } + #[graphql(description = "Status of the input")] + fn status(&self) -> CompletionStatus { + self.status.into() + } + #[graphql(description = "Address responsible for submitting the input")] fn msg_sender(&self) -> String { hex_encode(&self.msg_sender) diff --git a/offchain/graphql-server/tests/integration.rs b/offchain/graphql-server/tests/integration.rs index c913050fc..e1d6ca6be 100644 --- a/offchain/graphql-server/tests/integration.rs +++ b/offchain/graphql-server/tests/integration.rs @@ -5,7 +5,9 @@ use actix_web::dev::ServerHandle; use actix_web::rt::spawn; use awc::{Client, ClientRequest}; use graphql_server::{http, schema::Context}; -use rollups_data::{Input, Notice, Proof, Report, Repository, Voucher}; +use rollups_data::{ + CompletionStatus, Input, Notice, Proof, Report, Repository, Voucher, +}; use std::fs::read_to_string; use std::str::from_utf8; use std::time::{Duration, UNIX_EPOCH}; @@ -41,6 +43,7 @@ impl TestState<'_> { block_number: 0, timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), payload: "input-0".as_bytes().to_vec(), + status: CompletionStatus::Accepted, }; let notice = Notice { @@ -131,6 +134,7 @@ impl TestState<'_> { block_number: 0, timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), payload: "input-0".as_bytes().to_vec(), + status: CompletionStatus::Accepted, }; let notice0 = Notice { diff --git a/offchain/graphql-server/tests/queries/input.json b/offchain/graphql-server/tests/queries/input.json index 0a03f0c5a..49ed65209 100644 --- a/offchain/graphql-server/tests/queries/input.json +++ b/offchain/graphql-server/tests/queries/input.json @@ -1,3 +1,3 @@ { - "query": "{input(index: 0){index, msgSender, timestamp, blockNumber, payload}}" + "query": "{input(index: 0){index, msgSender, timestamp, blockNumber, payload, status}}" } \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input.json b/offchain/graphql-server/tests/responses/input.json index 911964643..82bea2976 100644 --- a/offchain/graphql-server/tests/responses/input.json +++ b/offchain/graphql-server/tests/responses/input.json @@ -1 +1 @@ -{"data":{"input":{"index":0,"msgSender":"0x6d73672d73656e646572","timestamp":"1676489717","blockNumber":"0","payload":"0x696e7075742d30"}}} \ No newline at end of file +{"data":{"input":{"index":0,"msgSender":"0x6d73672d73656e646572","timestamp":"1676489717","blockNumber":"0","payload":"0x696e7075742d30","status":"ACCEPTED"}}} \ No newline at end of file diff --git a/offchain/indexer/src/conversions.rs b/offchain/indexer/src/conversions.rs index b5a5231bb..1b579c428 100644 --- a/offchain/indexer/src/conversions.rs +++ b/offchain/indexer/src/conversions.rs @@ -7,11 +7,33 @@ use std::time::{Duration, UNIX_EPOCH}; use rollups_events::{ - RollupsAdvanceStateInput, RollupsNotice, RollupsOutputEnum, RollupsProof, - RollupsReport, RollupsVoucher, + RollupsAdvanceStateInput, RollupsCompletionStatus, RollupsNotice, + RollupsOutputEnum, RollupsProof, RollupsReport, RollupsVoucher, }; -use rollups_data::{Input, Notice, OutputEnum, Proof, Report, Voucher}; +use rollups_data::{ + CompletionStatus, Input, Notice, OutputEnum, Proof, Report, Voucher, +}; + +pub fn convert_status(status: RollupsCompletionStatus) -> CompletionStatus { + match status { + RollupsCompletionStatus::Accepted => CompletionStatus::Accepted, + RollupsCompletionStatus::Rejected => CompletionStatus::Rejected, + RollupsCompletionStatus::Exception => CompletionStatus::Exception, + RollupsCompletionStatus::MachineHalted => { + CompletionStatus::MachineHalted + } + RollupsCompletionStatus::CycleLimitExceeded => { + CompletionStatus::CycleLimitExceeded + } + RollupsCompletionStatus::TimeLimitExceeded => { + CompletionStatus::TimeLimitExceeded + } + RollupsCompletionStatus::PayloadLengthLimitExceeded => { + CompletionStatus::PayloadLengthLimitExceeded + } + } +} pub fn convert_input(input: RollupsAdvanceStateInput) -> Input { let timestamp = UNIX_EPOCH + Duration::from_secs(input.metadata.timestamp); @@ -22,6 +44,7 @@ pub fn convert_input(input: RollupsAdvanceStateInput) -> Input { block_number: input.metadata.block_number as i64, timestamp, payload: input.payload.into_inner(), + status: CompletionStatus::Unprocessed, } } diff --git a/offchain/indexer/src/indexer.rs b/offchain/indexer/src/indexer.rs index 7a8ed0a38..fb8a8b7aa 100644 --- a/offchain/indexer/src/indexer.rs +++ b/offchain/indexer/src/indexer.rs @@ -111,6 +111,10 @@ fn store_output( output: RollupsOutput, ) -> Result<(), rollups_data::Error> { match output { + RollupsOutput::AdvanceResult(result) => repository.update_input_status( + result.input_index as i32, + convert_status(result.status), + ), RollupsOutput::Voucher(voucher) => { repository.insert_voucher(convert_voucher(voucher)) } diff --git a/offchain/rollups-events/src/lib.rs b/offchain/rollups-events/src/lib.rs index 314a43912..8c75b8ff8 100644 --- a/offchain/rollups-events/src/lib.rs +++ b/offchain/rollups-events/src/lib.rs @@ -19,8 +19,8 @@ pub use rollups_inputs::{ RollupsInputsStream, }; pub use rollups_outputs::{ - RollupsNotice, RollupsOutput, RollupsOutputEnum, - RollupsOutputValidityProof, RollupsOutputsStream, RollupsProof, - RollupsReport, RollupsVoucher, + RollupsAdvanceResult, RollupsCompletionStatus, RollupsNotice, + RollupsOutput, RollupsOutputEnum, RollupsOutputValidityProof, + RollupsOutputsStream, RollupsProof, RollupsReport, RollupsVoucher, }; pub use rollups_stream::{DAppMetadata, DAppMetadataCLIConfig}; diff --git a/offchain/rollups-events/src/rollups_outputs.rs b/offchain/rollups-events/src/rollups_outputs.rs index 331409604..57c8bc0c3 100644 --- a/offchain/rollups-events/src/rollups_outputs.rs +++ b/offchain/rollups-events/src/rollups_outputs.rs @@ -12,12 +12,31 @@ decl_broker_stream!(RollupsOutputsStream, RollupsOutput, "rollups-outputs"); /// Cartesi output #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum RollupsOutput { + AdvanceResult(RollupsAdvanceResult), Voucher(RollupsVoucher), Notice(RollupsNotice), Report(RollupsReport), Proof(RollupsProof), } +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct RollupsAdvanceResult { + pub input_index: u64, + pub status: RollupsCompletionStatus, +} + +/// Based on CompletionStatus from the server-manager gRPC interface +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub enum RollupsCompletionStatus { + Accepted, + Rejected, + Exception, + MachineHalted, + CycleLimitExceeded, + TimeLimitExceeded, + PayloadLengthLimitExceeded, +} + #[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct RollupsVoucher { pub index: u64, diff --git a/offchain/schema.graphql b/offchain/schema.graphql deleted file mode 100644 index 37157196e..000000000 --- a/offchain/schema.graphql +++ /dev/null @@ -1,205 +0,0 @@ -"Data that can be used as proof to validate notices and execute vouchers on the base layer blockchain" -type Proof { - "Validity proof for an output" - validity: OutputValidityProof! - "Data that allows the validity proof to be contextualized within submitted claims, given as a payload in Ethereum hex binary format, starting with '0x'" - context: String! -} - -"Request submitted to the application to advance its state" -type Input { - "Input index starting from genesis" - index: Int! - "Address responsible for submitting the input" - msgSender: String! - "Timestamp associated with the input submission, as defined by the base layer's block in which it was recorded" - timestamp: BigInt! - "Number of the base layer block in which the input was recorded" - blockNumber: BigInt! - "Input payload in Ethereum hex binary format, starting with '0x'" - payload: String! - "Get voucher from this particular input given the voucher's index" - voucher(index: Int!): Voucher! - "Get notice from this particular input given the notice's index" - notice(index: Int!): Notice! - "Get report from this particular input given the report's index" - report(index: Int!): Report! - "Get vouchers from this particular input with support for pagination" - vouchers(first: Int, last: Int, after: String, before: String): VoucherConnection! - "Get notices from this particular input with support for pagination" - notices(first: Int, last: Int, after: String, before: String): NoticeConnection! - "Get reports from this particular input with support for pagination" - reports(first: Int, last: Int, after: String, before: String): ReportConnection! -} - -"Validity proof for an output" -type OutputValidityProof { - "Local input index within the context of the related epoch" - inputIndexWithinEpoch: Int! - "Output index within the context of the input that produced it" - outputIndexWithinInput: Int! - "Merkle root of all output hashes of the related input, given in Ethereum hex binary format (32 bytes), starting with '0x'" - outputHashesRootHash: String! - "Merkle root of all voucher hashes of the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - vouchersEpochRootHash: String! - "Merkle root of all notice hashes of the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - noticesEpochRootHash: String! - "Hash of the machine state claimed for the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - machineStateHash: String! - "Proof that this output hash is in the output-hashes merkle tree. This array of siblings is bottom-up ordered (from the leaf to the root). Each hash is given in Ethereum hex binary format (32 bytes), starting with '0x'." - outputHashInOutputHashesSiblings: [String!]! - "Proof that this output-hashes root hash is in epoch's output merkle tree. This array of siblings is bottom-up ordered (from the leaf to the root). Each hash is given in Ethereum hex binary format (32 bytes), starting with '0x'." - outputHashesInEpochSiblings: [String!]! -} - -"Representation of a transaction that can be carried out on the base layer blockchain, such as a transfer of assets" -type Voucher { - "Voucher index within the context of the input that produced it" - index: Int! - "Input whose processing produced the voucher" - input: Input! - "Transaction destination address in Ethereum hex binary format (20 bytes), starting with '0x'" - destination: String! - "Transaction payload in Ethereum hex binary format, starting with '0x'" - payload: String! - "Proof object that allows this voucher to be validated and executed on the base layer blockchain" - proof: Proof -} - -"Top level queries" -type Query { - "Get input based on its identifier" - input(index: Int!): Input! - "Get voucher based on its index" - voucher(voucherIndex: Int!, inputIndex: Int!): Voucher! - "Get notice based on its index" - notice(noticeIndex: Int!, inputIndex: Int!): Notice! - "Get report based on its index" - report(reportIndex: Int!, inputIndex: Int!): Report! - "Get inputs with support for pagination" - inputs(first: Int, last: Int, after: String, before: String, where: InputFilter): InputConnection! - "Get vouchers with support for pagination" - vouchers(first: Int, last: Int, after: String, before: String): VoucherConnection! - "Get notices with support for pagination" - notices(first: Int, last: Int, after: String, before: String): NoticeConnection! - "Get reports with support for pagination" - reports(first: Int, last: Int, after: String, before: String): ReportConnection! -} - -"Pagination entry" -type NoticeEdge { - "Node instance" - node: Notice! - "Pagination cursor" - cursor: String! -} - -"Pagination result" -type InputConnection { - "Total number of entries that match the query" - totalCount: Int! - "Pagination entries returned for the current page" - edges: [InputEdge!]! - "Pagination metadata" - pageInfo: PageInfo! -} - -"Pagination result" -type VoucherConnection { - "Total number of entries that match the query" - totalCount: Int! - "Pagination entries returned for the current page" - edges: [VoucherEdge!]! - "Pagination metadata" - pageInfo: PageInfo! -} - -"Informational statement that can be validated in the base layer blockchain" -type Notice { - "Notice index within the context of the input that produced it" - index: Int! - "Input whose processing produced the notice" - input: Input! - "Notice data as a payload in Ethereum hex binary format, starting with '0x'" - payload: String! - "Proof object that allows this notice to be validated by the base layer blockchain" - proof: Proof -} - -"Pagination entry" -type ReportEdge { - "Node instance" - node: Report! - "Pagination cursor" - cursor: String! -} - -"Pagination result" -type ReportConnection { - "Total number of entries that match the query" - totalCount: Int! - "Pagination entries returned for the current page" - edges: [ReportEdge!]! - "Pagination metadata" - pageInfo: PageInfo! -} - -"Filter object to restrict results depending on input properties" -input InputFilter { - "Filter only inputs with index lower than a given value" indexLowerThan: Int - "Filter only inputs with index greater than a given value" indexGreaterThan: Int -} - -scalar BigInt - -"Pagination result" -type NoticeConnection { - "Total number of entries that match the query" - totalCount: Int! - "Pagination entries returned for the current page" - edges: [NoticeEdge!]! - "Pagination metadata" - pageInfo: PageInfo! -} - -"Pagination entry" -type InputEdge { - "Node instance" - node: Input! - "Pagination cursor" - cursor: String! -} - -"Page metadata for the cursor-based Connection pagination pattern" -type PageInfo { - "Cursor pointing to the first entry of the page" - startCursor: String - "Cursor pointing to the last entry of the page" - endCursor: String - "Indicates if there are additional entries after the end curs" - hasNextPage: Boolean! - "Indicates if there are additional entries before the start curs" - hasPreviousPage: Boolean! -} - -"Application log or diagnostic information" -type Report { - "Report index within the context of the input that produced it" - index: Int! - "Input whose processing produced the report" - input: Input! - "Report data as a payload in Ethereum hex binary format, starting with '0x'" - payload: String! -} - -"Pagination entry" -type VoucherEdge { - "Node instance" - node: Voucher! - "Pagination cursor" - cursor: String! -} - -schema { - query: Query -}