From 76fc56541460381c26de982aa8ed4a2c93d4b9c2 Mon Sep 17 00:00:00 2001 From: Kiryl Mialeshka <8974488+meskill@users.noreply.github.com> Date: Fri, 13 Dec 2024 23:53:50 +0100 Subject: [PATCH] feat: add support for separate runtime config in json & yaml formats (#3221) --- examples/jsonplaceholder.yaml | 12 + generated/.tailcallrc.graphql | 441 ------------------------------ generated/.tailcallrc.schema.json | 190 ++++++++----- src/cli/generator/source.rs | 37 ++- src/cli/tc/init.rs | 97 ++++--- src/core/config/config.rs | 116 +++++--- src/core/config/from_document.rs | 16 +- src/core/config/source.rs | 53 ++-- tailcall-typedefs/src/main.rs | 14 +- 9 files changed, 313 insertions(+), 663 deletions(-) create mode 100644 examples/jsonplaceholder.yaml diff --git a/examples/jsonplaceholder.yaml b/examples/jsonplaceholder.yaml new file mode 100644 index 0000000000..f1edf73537 --- /dev/null +++ b/examples/jsonplaceholder.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=../generated/.tailcallrc.schema.json + +server: + port: 8000 + +upstream: + batch: + delay: 100 + httpCache: 42 + +links: + - src: ./jsonplaceholder.graphql diff --git a/generated/.tailcallrc.graphql b/generated/.tailcallrc.graphql index 587bcd53c7..ddc1563d6b 100644 --- a/generated/.tailcallrc.graphql +++ b/generated/.tailcallrc.graphql @@ -247,35 +247,6 @@ directive @js( name: String! ) repeatable on FIELD_DEFINITION | OBJECT -""" -The @link directive allows you to import external resources, such as configuration -– which will be merged into the config importing it –, or a .proto file – which - will be later used by `@grpc` directive –. -""" -directive @link( - """ - Custom headers for gRPC reflection server. - """ - headers: [KeyValue] - """ - The id of the link. It is used to reference the link in the schema. - """ - id: String - """ - Additional metadata pertaining to the linked resource. - """ - meta: JSON - """ - The source of the link. It can be a URL or a path to a file. If a path is provided, - it is relative to the file that imports the link. - """ - src: String - """ - The type of the link. It can be `Config`, or `Protobuf`. - """ - type: LinkType -) repeatable on SCHEMA - directive @modify( name: String omit: Boolean @@ -303,200 +274,6 @@ directive @protected( id: [String!] ) on OBJECT | FIELD_DEFINITION -""" -The `@server` directive, when applied at the schema level, offers a comprehensive -set of server configurations. It dictates how the server behaves and helps tune tailcall -for various use-cases. -""" -directive @server( - """ - `apolloTracing` exposes GraphQL query performance data, including execution time - of queries and individual resolvers. - """ - apolloTracing: Boolean - """ - `batchRequests` combines multiple requests into one, improving performance but potentially - introducing latency and complicating debugging. Use judiciously. @default `false`. - """ - batchRequests: Boolean - """ - `enableFederation` enables functionality to Tailcall server to act as a federation - subgraph. - """ - enableFederation: Boolean - enableJIT: Boolean - """ - `globalResponseTimeout` sets the maximum query duration before termination, acting - as a safeguard against long-running queries. - """ - globalResponseTimeout: Int - """ - `headers` contains key-value pairs that are included as default headers in server - responses, allowing for consistent header management across all responses. - """ - headers: Headers - """ - `hostname` sets the server hostname. - """ - hostname: String - """ - `introspection` allows clients to fetch schema information directly, aiding tools - and applications in understanding available types, fields, and operations. @default - `true`. - """ - introspection: Boolean - """ - `pipelineFlush` allows to control flushing behavior of the server pipeline. - """ - pipelineFlush: Boolean - """ - `port` sets the Tailcall running port. @default `8000`. - """ - port: Int - """ - `queryValidation` checks incoming GraphQL queries against the schema, preventing - errors from invalid queries. Can be disabled for performance. @default `false`. - """ - queryValidation: Boolean - """ - `responseValidation` Tailcall automatically validates responses from upstream services - using inferred schema. @default `false`. - """ - responseValidation: Boolean - """ - `routes` allows customization of server endpoint paths. It provides options to change - the default paths for status and GraphQL endpoints. Default values are: - status: - "/status" - graphQL: "/graphql" If not specified, these default values will be used. - """ - routes: Routes - """ - A link to an external JS file that listens on every HTTP request response event. - """ - script: ScriptOptions - """ - `showcase` enables the /showcase/graphql endpoint. - """ - showcase: Boolean - """ - This configuration defines local variables for server operations. Useful for storing - constant configurations, secrets, or shared information. - """ - vars: [KeyValue] - """ - `version` sets the HTTP version for the server. Options are `HTTP1` and `HTTP2`. - @default `HTTP1`. - """ - version: HttpVersion - """ - `workers` sets the number of worker threads. @default the number of system cores. - """ - workers: Int -) on SCHEMA - -""" -The @telemetry directive facilitates seamless integration with OpenTelemetry, enhancing -the observability of your GraphQL services powered by Tailcall. By leveraging this -directive, developers gain access to valuable insights into the performance and behavior -of their applications. -""" -directive @telemetry( - export: TelemetryExporter - """ - The list of headers that will be sent as additional attributes to telemetry exporters - Be careful about **leaking sensitive information** from requests when enabling the - headers that may contain sensitive data - """ - requestHeaders: [String!] -) on SCHEMA - -""" -The `upstream` directive allows you to control various aspects of the upstream server -connection. This includes settings like connection timeouts, keep-alive intervals, -and more. If not specified, default values are used. -""" -directive @upstream( - """ - `allowedHeaders` defines the HTTP headers allowed to be forwarded to upstream services. - If not set, no headers are forwarded, enhancing security but possibly limiting data - flow. - """ - allowedHeaders: [String!] - """ - An object that specifies the batch settings, including `maxSize` (the maximum size - of the batch), `delay` (the delay in milliseconds between each batch), and `headers` - (an array of HTTP headers to be included in the batch). - """ - batch: Batch - """ - The time in seconds that the connection will wait for a response before timing out. - """ - connectTimeout: Int - """ - The `http2Only` setting allows you to specify whether the client should always issue - HTTP2 requests, without checking if the server supports it or not. By default it - is set to `false` for all HTTP requests made by the server, but is automatically - set to true for GRPC. - """ - http2Only: Boolean - """ - Providing httpCache size enables Tailcall's HTTP caching, adhering to the [HTTP Caching - RFC](https://tools.ietf.org/html/rfc7234), to enhance performance by minimizing redundant - data fetches. Defaults to `0` if unspecified. - """ - httpCache: Int - """ - The time in seconds between each keep-alive message sent to maintain the connection. - """ - keepAliveInterval: Int - """ - The time in seconds that the connection will wait for a keep-alive message before - closing. - """ - keepAliveTimeout: Int - """ - A boolean value that determines whether keep-alive messages should be sent while - the connection is idle. - """ - keepAliveWhileIdle: Boolean - """ - onRequest field gives the ability to specify the global request interception handler. - """ - onRequest: String - """ - The time in seconds that the connection pool will wait before closing idle connections. - """ - poolIdleTimeout: Int - """ - The maximum number of idle connections that will be maintained per host. - """ - poolMaxIdlePerHost: Int - """ - The `proxy` setting defines an intermediary server through which the upstream requests - will be routed before reaching their intended endpoint. By specifying a proxy URL, - you introduce an additional layer, enabling custom routing and security policies. - """ - proxy: Proxy - """ - The time in seconds between each TCP keep-alive message sent to maintain the connection. - """ - tcpKeepAlive: Int - """ - The maximum time in seconds that the connection will wait for a response. - """ - timeout: Int - """ - The User-Agent header value to be used in HTTP requests. @default `Tailcall/1.0` - """ - userAgent: String - """ - A boolean value that determines whether to verify certificates. Setting this as `false` - will make tailcall accept self-signed certificates. NOTE: use this *only* during - development or testing. It is highly recommended to keep this enabled (`true`) in - production. - """ - verifySSL: Boolean -) on SCHEMA - """ The `@discriminate` directive is used to drive Tailcall discriminator to use a field of an object to resolve the type. For example with the directive applied on a field @@ -652,156 +429,6 @@ input Schema { Enum: [String!] } -""" -Type to configure Cross-Origin Resource Sharing (CORS) for a server. -""" -input Cors { - """ - Indicates whether the server allows credentials (e.g., cookies, authorization headers) - to be sent in cross-origin requests. - """ - allowCredentials: Boolean - """ - A list of allowed headers in cross-origin requests. This can be used to specify custom - headers that are allowed to be included in cross-origin requests. - """ - allowHeaders: [String!] - """ - A list of allowed HTTP methods in cross-origin requests. These methods specify the - actions that are permitted in cross-origin requests. - """ - allowMethods: [Method] - """ - A list of origins that are allowed to access the server's resources in cross-origin - requests. An origin can be a domain, a subdomain, or even 'null' for local file schemes. - """ - allowOrigins: [String!] - """ - Indicates whether requests from private network addresses are allowed in cross-origin - requests. Private network addresses typically include IP addresses reserved for internal - networks. - """ - allowPrivateNetwork: Boolean - """ - A list of headers that the server exposes to the browser in cross-origin responses. - Exposing certain headers allows the client-side code to access them in the response. - """ - exposeHeaders: [String!] - """ - The maximum time (in seconds) that the client should cache preflight OPTIONS requests - in order to avoid sending excessive requests to the server. - """ - maxAge: Int - """ - A list of header names that indicate the values of which might cause the server's - response to vary, potentially affecting caching. - """ - vary: [String!] -} - -input Headers { - """ - `cacheControl` sends `Cache-Control` headers in responses when activated. The `max-age` - value is the least of the values received from upstream services. @default `false`. - """ - cacheControl: Boolean - """ - `cors` allows Cross-Origin Resource Sharing (CORS) for a server. - """ - cors: Cors - """ - `headers` are key-value pairs included in every server response. Useful for setting - headers like `Access-Control-Allow-Origin` for cross-origin requests or additional - headers for downstream services. - """ - custom: [KeyValue] - """ - `experimental` allows the use of `X-*` experimental headers in the response. @default - `[]`. - """ - experimental: [String!] - """ - `setCookies` when enabled stores `set-cookie` headers and all the response will be - sent with the headers. - """ - setCookies: Boolean -} - -input Routes { - graphQL: String - status: String -} - -input ScriptOptions { - timeout: Int -} - -input Apollo { - """ - Setting `apiKey` for Apollo. - """ - apiKey: String! - """ - Setting `graphRef` for Apollo in the format @. - """ - graphRef: String! - """ - Setting `platform` for Apollo. - """ - platform: String - """ - Setting `userVersion` for Apollo. - """ - userVersion: String - """ - Setting `version` for Apollo. - """ - version: String -} - -""" -Output the opentelemetry data to otlp collector -""" -input OtlpExporter { - headers: [KeyValue] - url: String! -} - -""" -Output the telemetry metrics data to prometheus server -""" -input PrometheusExporter { - format: PrometheusFormat - path: String -} - -""" -Output the opentelemetry data to the stdout. Mostly used for debug purposes -""" -input StdoutExporter { - """ - Output to stdout in pretty human-readable format - """ - pretty: Boolean -} - -input TelemetryExporter { - stdout: StdoutExporter - otlp: OtlpExporter - prometheus: PrometheusExporter - apollo: Apollo -} - -input Batch { - delay: Int - headers: [String!] - maxSize: Int -} - -input Proxy { - url: String! -} - """ The @graphQL operator allows to specify GraphQL API server request to fetch data from. @@ -1016,22 +643,6 @@ input Cache { maxAge: Int! } -""" -The @telemetry directive facilitates seamless integration with OpenTelemetry, enhancing -the observability of your GraphQL services powered by Tailcall. By leveraging this -directive, developers gain access to valuable insights into the performance and behavior -of their applications. -""" -input Telemetry { - export: TelemetryExporter - """ - The list of headers that will be sent as additional attributes to telemetry exporters - Be careful about **leaking sensitive information** from requests when enabling the - headers that may contain sensitive data - """ - requestHeaders: [String!] -} - enum Encoding { ApplicationJson ApplicationXWwwFormUrlencoded @@ -1047,56 +658,4 @@ enum Method { OPTIONS CONNECT TRACE -} - -enum LinkType { - """ - Points to another Tailcall Configuration file. The imported configuration will be merged into the importing configuration. - """ - Config - """ - Points to a Protobuf file. The imported Protobuf file will be used by the `@grpc` directive. If your API exposes a reflection endpoint, you should set the type to `Grpc` instead. - """ - Protobuf - """ - Points to a JS file. The imported JS file will be used by the `@js` directive. - """ - Script - """ - Points to a Cert file. The imported Cert file will be used by the server to serve over HTTPS. - """ - Cert - """ - Points to a Key file. The imported Key file will be used by the server to serve over HTTPS. - """ - Key - """ - A trusted document that contains GraphQL operations (queries, mutations) that can be exposed a REST API using the `@rest` directive. - """ - Operation - """ - Points to a Htpasswd file. The imported Htpasswd file will be used by the server to authenticate users. - """ - Htpasswd - """ - Points to a Jwks file. The imported Jwks file will be used by the server to authenticate users. - """ - Jwks - """ - Points to a reflection endpoint. The imported reflection endpoint will be used by the `@grpc` directive to resolve data from gRPC services. - """ - Grpc -} - -enum HttpVersion { - HTTP1 - HTTP2 -} - -""" -Output format for prometheus data -""" -enum PrometheusFormat { - text - protobuf } \ No newline at end of file diff --git a/generated/.tailcallrc.schema.json b/generated/.tailcallrc.schema.json index e76f874e55..62a29551d1 100644 --- a/generated/.tailcallrc.schema.json +++ b/generated/.tailcallrc.schema.json @@ -1,8 +1,18 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Config", + "title": "RuntimeConfig", "type": "object", + "required": [ + "links" + ], "properties": { + "links": { + "description": "A list of all links in the schema.", + "type": "array", + "items": { + "$ref": "#/definitions/Link" + } + }, "server": { "description": "Dictates how the server behaves and helps tune tailcall for all ingress requests. Features such as request batching, SSL, HTTP2 etc. can be configured here.", "default": {}, @@ -96,10 +106,6 @@ } } }, - "Bytes": { - "title": "Bytes", - "description": "Field whose value is a sequence of bytes." - }, "Cors": { "description": "Type to configure Cross-Origin Resource Sharing (CORS) for a server.", "type": "object", @@ -169,22 +175,6 @@ } } }, - "Date": { - "title": "Date", - "description": "Field whose value conforms to the standard date format as specified in RFC 3339 (https://datatracker.ietf.org/doc/html/rfc3339)." - }, - "DateTime": { - "title": "DateTime", - "description": "Field whose value conforms to the standard datetime format as specified in RFC 3339 (https://datatracker.ietf.org/doc/html/rfc3339\")." - }, - "Email": { - "title": "Email", - "description": "Field whose value conforms to the standard internet email address format as specified in HTML Spec: https://html.spec.whatwg.org/multipage/input.html#valid-e-mail-address." - }, - "Empty": { - "title": "Empty", - "description": "Empty scalar type represents an empty value." - }, "Headers": { "type": "object", "properties": { @@ -240,30 +230,6 @@ "HTTP2" ] }, - "Int128": { - "title": "Int128", - "description": "Field whose value is a 128-bit signed integer." - }, - "Int16": { - "title": "Int16", - "description": "Field whose value is a 16-bit signed integer." - }, - "Int32": { - "title": "Int32", - "description": "Field whose value is a 32-bit signed integer." - }, - "Int64": { - "title": "Int64", - "description": "Field whose value is a 64-bit signed integer." - }, - "Int8": { - "title": "Int8", - "description": "Field whose value is an 8-bit signed integer." - }, - "JSON": { - "title": "JSON", - "description": "Field whose value conforms to the standard JSON format as specified in RFC 8259 (https://datatracker.ietf.org/doc/html/rfc8259)." - }, "KeyValue": { "type": "object", "required": [ @@ -279,6 +245,112 @@ } } }, + "Link": { + "description": "The @link directive allows you to import external resources, such as configuration – which will be merged into the config importing it –, or a .proto file – which will be later used by `@grpc` directive –.", + "type": "object", + "properties": { + "headers": { + "description": "Custom headers for gRPC reflection server.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/KeyValue" + } + }, + "id": { + "description": "The id of the link. It is used to reference the link in the schema.", + "type": [ + "string", + "null" + ] + }, + "meta": { + "description": "Additional metadata pertaining to the linked resource." + }, + "src": { + "description": "The source of the link. It can be a URL or a path to a file. If a path is provided, it is relative to the file that imports the link.", + "type": "string" + }, + "type": { + "description": "The type of the link. It can be `Config`, or `Protobuf`.", + "allOf": [ + { + "$ref": "#/definitions/LinkType" + } + ] + } + }, + "additionalProperties": false + }, + "LinkType": { + "oneOf": [ + { + "description": "Points to another Tailcall Configuration file. The imported configuration will be merged into the importing configuration.", + "type": "string", + "enum": [ + "Config" + ] + }, + { + "description": "Points to a Protobuf file. The imported Protobuf file will be used by the `@grpc` directive. If your API exposes a reflection endpoint, you should set the type to `Grpc` instead.", + "type": "string", + "enum": [ + "Protobuf" + ] + }, + { + "description": "Points to a JS file. The imported JS file will be used by the `@js` directive.", + "type": "string", + "enum": [ + "Script" + ] + }, + { + "description": "Points to a Cert file. The imported Cert file will be used by the server to serve over HTTPS.", + "type": "string", + "enum": [ + "Cert" + ] + }, + { + "description": "Points to a Key file. The imported Key file will be used by the server to serve over HTTPS.", + "type": "string", + "enum": [ + "Key" + ] + }, + { + "description": "A trusted document that contains GraphQL operations (queries, mutations) that can be exposed a REST API using the `@rest` directive.", + "type": "string", + "enum": [ + "Operation" + ] + }, + { + "description": "Points to a Htpasswd file. The imported Htpasswd file will be used by the server to authenticate users.", + "type": "string", + "enum": [ + "Htpasswd" + ] + }, + { + "description": "Points to a Jwks file. The imported Jwks file will be used by the server to authenticate users.", + "type": "string", + "enum": [ + "Jwks" + ] + }, + { + "description": "Points to a reflection endpoint. The imported reflection endpoint will be used by the `@grpc` directive to resolve data from gRPC services.", + "type": "string", + "enum": [ + "Grpc" + ] + } + ] + }, "Method": { "type": "string", "enum": [ @@ -311,10 +383,6 @@ } } }, - "PhoneNumber": { - "title": "PhoneNumber", - "description": "Field whose value conforms to the standard E.164 format as specified in E.164 specification (https://en.wikipedia.org/wiki/E.164)." - }, "PrometheusExporter": { "description": "Output the telemetry metrics data to prometheus server", "type": "object", @@ -612,26 +680,6 @@ } ] }, - "UInt128": { - "title": "UInt128", - "description": "Field whose value is a 128-bit unsigned integer." - }, - "UInt16": { - "title": "UInt16", - "description": "Field whose value is a 16-bit unsigned integer." - }, - "UInt32": { - "title": "UInt32", - "description": "Field whose value is a 32-bit unsigned integer." - }, - "UInt64": { - "title": "UInt64", - "description": "Field whose value is a 64-bit unsigned integer." - }, - "UInt8": { - "title": "UInt8", - "description": "Field whose value is an 8-bit unsigned integer." - }, "Upstream": { "description": "The `upstream` directive allows you to control various aspects of the upstream server connection. This includes settings like connection timeouts, keep-alive intervals, and more. If not specified, default values are used.", "type": "object", @@ -778,10 +826,6 @@ } }, "additionalProperties": false - }, - "Url": { - "title": "Url", - "description": "Field whose value conforms to the standard URL format as specified in RFC 3986 (https://datatracker.ietf.org/doc/html/rfc3986)." } } } \ No newline at end of file diff --git a/src/cli/generator/source.rs b/src/cli/generator/source.rs index 1050dc4d34..caa87eb14a 100644 --- a/src/cli/generator/source.rs +++ b/src/cli/generator/source.rs @@ -1,4 +1,5 @@ -use thiserror::Error; +use crate::core::config; +use crate::core::config::SourceError; #[derive(Clone, Copy, PartialEq, Debug)] pub enum ConfigSource { @@ -6,29 +7,25 @@ pub enum ConfigSource { Yml, } -impl ConfigSource { - fn ext(&self) -> &str { - match self { - Self::Json => "json", - Self::Yml => "yml", - } - } +impl TryFrom for ConfigSource { + type Error = SourceError; - fn ends_with(&self, file: &str) -> bool { - file.ends_with(&format!(".{}", self.ext())) + fn try_from(value: config::Source) -> Result { + match value { + config::Source::Json => Ok(Self::Json), + config::Source::Yml => Ok(Self::Yml), + config::Source::GraphQL => { + Err(SourceError::UnsupportedFileFormat(value.ext().to_string())) + } + } } +} +impl ConfigSource { /// Detect the config format from the file name - pub fn detect(name: &str) -> Result { - const ALL: &[ConfigSource] = &[ConfigSource::Json, ConfigSource::Yml]; + pub fn detect(name: &str) -> Result { + let source = config::Source::detect(name)?; - ALL.iter() - .find(|format| format.ends_with(name)) - .copied() - .ok_or(UnsupportedFileFormat(name.to_string())) + ConfigSource::try_from(source) } } - -#[derive(Debug, Error, PartialEq)] -#[error("Unsupported config extension: {0}")] -pub struct UnsupportedFileFormat(String); diff --git a/src/cli/tc/init.rs b/src/cli/tc/init.rs index abbacf9e03..b7ef91a606 100644 --- a/src/cli/tc/init.rs +++ b/src/cli/tc/init.rs @@ -1,21 +1,25 @@ use std::collections::BTreeMap; use std::path::Path; -use anyhow::Result; +use anyhow::{anyhow, Result}; use super::helpers::{GRAPHQL_RC, TAILCALL_RC, TAILCALL_RC_SCHEMA}; use crate::cli::runtime::{confirm_and_write, create_directory, select_prompt}; -use crate::core::config::{Config, Expr, Field, Resolver, RootSchema, Source}; +use crate::core::config::{ + Config, Expr, Field, Link, LinkType, Resolver, RootSchema, RuntimeConfig, Source, +}; use crate::core::merge_right::MergeRight; use crate::core::runtime::TargetRuntime; use crate::core::{config, Type}; +const SCHEMA_FILENAME: &str = "main.graphql"; + pub(super) async fn init_command(runtime: TargetRuntime, folder_path: &str) -> Result<()> { create_directory(folder_path).await?; let selection = select_prompt( "Please select the format in which you want to generate the config.", - vec![Source::GraphQL, Source::Json, Source::Yml], + vec![Source::Json, Source::Yml], )?; let tailcallrc = include_str!("../../../generated/.tailcallrc.graphql"); @@ -25,30 +29,24 @@ pub(super) async fn init_command(runtime: TargetRuntime, folder_path: &str) -> R let tailcall_rc_schema = Path::new(folder_path).join(TAILCALL_RC_SCHEMA); let graphql_rc = Path::new(folder_path).join(GRAPHQL_RC); - match selection { - Source::GraphQL => { - // .tailcallrc.graphql - confirm_and_write( - runtime.clone(), - &tailcall_rc.display().to_string(), - tailcallrc.as_bytes(), - ) - .await?; - - // .graphqlrc.yml - confirm_and_write_yml(runtime.clone(), &graphql_rc).await?; - } - - Source::Json | Source::Yml => { - // .tailcallrc.schema.json - confirm_and_write( - runtime.clone(), - &tailcall_rc_schema.display().to_string(), - tailcallrc_json.as_bytes(), - ) - .await?; - } - } + // .tailcallrc.graphql + confirm_and_write( + runtime.clone(), + &tailcall_rc.display().to_string(), + tailcallrc.as_bytes(), + ) + .await?; + + // .graphqlrc.yml + confirm_and_write_yml(runtime.clone(), &graphql_rc).await?; + + // .tailcallrc.schema.json + confirm_and_write( + runtime.clone(), + &tailcall_rc_schema.display().to_string(), + tailcallrc_json.as_bytes(), + ) + .await?; create_main(runtime.clone(), folder_path, selection).await?; @@ -97,33 +95,60 @@ fn main_config() -> Config { }; Config { - server: Default::default(), - upstream: Default::default(), schema: RootSchema { query: Some("Query".to_string()), ..Default::default() }, types: BTreeMap::from([("Query".into(), query_type)]), ..Default::default() } } +fn runtime_config() -> RuntimeConfig { + let config = RuntimeConfig::default(); + + config.links(vec![Link { + id: Some("main".to_string()), + src: SCHEMA_FILENAME.to_string(), + type_of: LinkType::Config, + ..Default::default() + }]) +} + async fn create_main( runtime: TargetRuntime, folder_path: impl AsRef, source: Source, ) -> Result<()> { let config = main_config(); - - let content = match source { - Source::GraphQL => config.to_sdl(), - Source::Json => config.to_json(true)?, - Source::Yml => config.to_yaml()?, + let runtime_config = runtime_config(); + + let runtime_config = match source { + Source::Json => runtime_config.to_json(true)?, + Source::Yml => runtime_config.to_yaml()?, + _ => { + return Err(anyhow!( + "Only json/yaml formats are supported for json configs" + )) + } }; - let path = folder_path + let schema = config.to_sdl(); + + let runtime_config_path = folder_path .as_ref() .join(format!("main.{}", source.ext())) .display() .to_string(); + let schema_path = folder_path + .as_ref() + .join(SCHEMA_FILENAME) + .display() + .to_string(); - confirm_and_write(runtime.clone(), &path, content.as_bytes()).await?; + confirm_and_write( + runtime.clone(), + &runtime_config_path, + runtime_config.as_bytes(), + ) + .await?; + confirm_and_write(runtime.clone(), &schema_path, schema.as_bytes()).await?; Ok(()) } diff --git a/src/core/config/config.rs b/src/core/config/config.rs index a58715bec1..759ee0a9aa 100644 --- a/src/core/config/config.rs +++ b/src/core/config/config.rs @@ -38,8 +38,7 @@ use crate::core::scalar::Scalar; schemars::JsonSchema, MergeRight, )] -#[serde(rename_all = "camelCase")] -pub struct Config { +pub struct RuntimeConfig { /// /// Dictates how the server behaves and helps tune tailcall for all ingress /// requests. Features such as request batching, SSL, HTTP2 etc. can be @@ -53,35 +52,51 @@ pub struct Config { #[serde(default)] pub upstream: Upstream, + /// + /// A list of all links in the schema. + pub links: Vec, + + /// Enable [opentelemetry](https://opentelemetry.io) support + #[serde(default, skip_serializing_if = "is_default")] + pub telemetry: Telemetry, +} + +#[derive(Clone, Debug, Default, Setters, PartialEq, Eq, MergeRight)] +pub struct Config { + /// + /// Dictates how the server behaves and helps tune tailcall for all ingress + /// requests. Features such as request batching, SSL, HTTP2 etc. can be + /// configured here. + pub server: Server, + + /// + /// Dictates how tailcall should handle upstream requests/responses. + /// Tuning upstream can improve performance and reliability for connections. + pub upstream: Upstream, + /// /// Specifies the entry points for query and mutation in the generated /// GraphQL schema. - #[serde(skip)] pub schema: RootSchema, /// /// A map of all the types in the schema. - #[serde(skip)] #[setters(skip)] pub types: BTreeMap, /// /// A map of all the union types in the schema. - #[serde(skip)] pub unions: BTreeMap, /// /// A map of all the enum types in the schema - #[serde(skip)] pub enums: BTreeMap, /// /// A list of all links in the schema. - #[serde(skip)] pub links: Vec, /// Enable [opentelemetry](https://opentelemetry.io) support - #[serde(default, skip_serializing_if = "is_default")] pub telemetry: Telemetry, } @@ -305,7 +320,47 @@ impl Display for GraphQLOperationType { } } +impl RuntimeConfig { + pub fn from_json(json: &str) -> Result { + Ok(serde_json::from_str(json)?) + } + + pub fn from_yaml(yaml: &str) -> Result { + Ok(serde_yaml_ng::from_str(yaml)?) + } + + pub fn from_source(source: Source, config: &str) -> Result { + match source { + Source::Json => RuntimeConfig::from_json(config), + Source::Yml => RuntimeConfig::from_yaml(config), + _ => Err(anyhow!("Only the json/yaml runtime configs are supported")), + } + } + + pub fn to_yaml(&self) -> Result { + Ok(serde_yaml_ng::to_string(self)?) + } + + pub fn to_json(&self, pretty: bool) -> Result { + if pretty { + Ok(serde_json::to_string_pretty(self)?) + } else { + Ok(serde_json::to_string(self)?) + } + } +} + impl Config { + pub fn with_runtime_config(self, runtime_config: RuntimeConfig) -> Self { + Self { + server: runtime_config.server, + upstream: runtime_config.upstream, + links: runtime_config.links, + telemetry: runtime_config.telemetry, + ..self + } + } + pub fn is_root_operation_type(&self, type_name: &str) -> bool { let type_name = type_name.to_lowercase(); @@ -335,18 +390,6 @@ impl Config { self.enums.get(name) } - pub fn to_yaml(&self) -> Result { - Ok(serde_yaml_ng::to_string(self)?) - } - - pub fn to_json(&self, pretty: bool) -> Result { - if pretty { - Ok(serde_json::to_string_pretty(self)?) - } else { - Ok(serde_json::to_string(self)?) - } - } - /// Renders current config to graphQL string pub fn to_sdl(&self) -> String { crate::core::document::print(self.into()) @@ -372,14 +415,6 @@ impl Config { || self.enums.contains_key(name) } - pub fn from_json(json: &str) -> Result { - Ok(serde_json::from_str(json)?) - } - - pub fn from_yaml(yaml: &str) -> Result { - Ok(serde_yaml_ng::from_str(yaml)?) - } - pub fn from_sdl(sdl: &str) -> Valid { let doc = async_graphql::parser::parse_schema(sdl); match doc { @@ -388,10 +423,10 @@ impl Config { } } - pub fn from_source(source: Source, schema: &str) -> Result { + pub fn from_source(source: Source, content: &str) -> Result { match source { - Source::GraphQL => Ok(Config::from_sdl(schema).to_result()?), - _ => Err(anyhow!("Only the graphql config is currently supported")), + Source::GraphQL => Ok(Config::from_sdl(content).to_result()?), + source => Ok(Config::from(RuntimeConfig::from_source(source, content)?)), } } @@ -607,13 +642,9 @@ impl Config { .add_directive(Grpc::directive_definition(generated_types)) .add_directive(Http::directive_definition(generated_types)) .add_directive(JS::directive_definition(generated_types)) - .add_directive(Link::directive_definition(generated_types)) .add_directive(Modify::directive_definition(generated_types)) .add_directive(Omit::directive_definition(generated_types)) .add_directive(Protected::directive_definition(generated_types)) - .add_directive(Server::directive_definition(generated_types)) - .add_directive(Telemetry::directive_definition(generated_types)) - .add_directive(Upstream::directive_definition(generated_types)) .add_directive(Discriminate::directive_definition(generated_types)) .add_input(GraphQL::input_definition()) .add_input(Grpc::input_definition()) @@ -621,8 +652,7 @@ impl Config { .add_input(Expr::input_definition()) .add_input(JS::input_definition()) .add_input(Modify::input_definition()) - .add_input(Cache::input_definition()) - .add_input(Telemetry::input_definition()); + .add_input(Cache::input_definition()); for scalar in Scalar::iter() { builder = builder.add_scalar(scalar.scalar_definition()); @@ -632,6 +662,18 @@ impl Config { } } +impl From for Config { + fn from(config: RuntimeConfig) -> Self { + Self { + server: config.server, + upstream: config.upstream, + links: config.links, + telemetry: config.telemetry, + ..Default::default() + } + } +} + #[derive( Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Default, schemars::JsonSchema, )] diff --git a/src/core/config/from_document.rs b/src/core/config/from_document.rs index b05757900f..20096936a4 100644 --- a/src/core/config/from_document.rs +++ b/src/core/config/from_document.rs @@ -12,7 +12,7 @@ use indexmap::IndexMap; use tailcall_valid::{Valid, ValidationError, Validator}; use super::directive::{to_directive, Directive}; -use super::{Alias, Discriminate, Resolver, Telemetry, FEDERATION_DIRECTIVES}; +use super::{Alias, Discriminate, Resolver, RuntimeConfig, Telemetry, FEDERATION_DIRECTIVES}; use crate::core::config::{ self, Cache, Config, Enum, Link, Modify, Omit, Protected, RootSchema, Server, Union, Upstream, Variant, @@ -51,15 +51,11 @@ pub fn from_document(doc: ServiceDocument) -> Valid { .fuse(links(sd)) .fuse(telemetry(sd)) .map( - |(server, upstream, types, unions, enums, schema, links, telemetry)| Config { - server, - upstream, - types, - unions, - enums, - schema, - links, - telemetry, + |(server, upstream, types, unions, enums, schema, links, telemetry)| { + let runtime_config = RuntimeConfig { server, upstream, links, telemetry }; + let config = Config { types, unions, enums, schema, ..Default::default() }; + + config.with_runtime_config(runtime_config) }, ) }) diff --git a/src/core/config/source.rs b/src/core/config/source.rs index 24e6adc110..010673fdb0 100644 --- a/src/core/config/source.rs +++ b/src/core/config/source.rs @@ -1,10 +1,10 @@ +use std::path::Path; +use std::str::FromStr; + use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use tailcall_valid::{ValidationError, Validator}; use thiserror::Error; -use super::Config; - #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub enum Source { @@ -27,21 +27,24 @@ impl std::fmt::Display for Source { const JSON_EXT: &str = "json"; const YML_EXT: &str = "yml"; const GRAPHQL_EXT: &str = "graphql"; -const ALL: [Source; 3] = [Source::Json, Source::Yml, Source::GraphQL]; #[derive(Debug, Error, PartialEq)] -#[error("Unsupported config extension: {0}")] -pub struct UnsupportedConfigFormat(pub String); +pub enum SourceError { + #[error("Unsupported config extension: {0}")] + UnsupportedFileFormat(String), + #[error("Cannot parse")] + InvalidPath(String), +} impl std::str::FromStr for Source { - type Err = UnsupportedConfigFormat; + type Err = SourceError; fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "json" => Ok(Source::Json), "yml" | "yaml" => Ok(Source::Yml), "graphql" | "gql" => Ok(Source::GraphQL), - _ => Err(UnsupportedConfigFormat(s.to_string())), + _ => Err(SourceError::UnsupportedFileFormat(s.to_string())), } } } @@ -56,34 +59,12 @@ impl Source { } } - fn ends_with(&self, file: &str) -> bool { - file.ends_with(&format!(".{}", self.ext())) - } - /// Detect the config format from the file name - pub fn detect(name: &str) -> Result { - ALL.into_iter() - .find(|format| format.ends_with(name)) - .ok_or(UnsupportedConfigFormat(name.to_string())) - } - - /// Encode the config to the given format - pub fn encode(&self, config: &Config) -> Result { - match self { - Source::Yml => Ok(config.to_yaml()?), - Source::GraphQL => Ok(config.to_sdl()), - Source::Json => Ok(config.to_json(true)?), - } - } - - /// Decode the config from the given data - pub fn decode(&self, data: &str) -> Result> { - match self { - Source::Yml => Config::from_yaml(data).map_err(|e| ValidationError::new(e.to_string())), - Source::GraphQL => Config::from_sdl(data).to_result(), - Source::Json => { - Config::from_json(data).map_err(|e| ValidationError::new(e.to_string())) - } - } + pub fn detect(name: &str) -> Result { + Path::new(name) + .extension() + .and_then(|ext| ext.to_str()) + .map(Source::from_str) + .ok_or(SourceError::InvalidPath(name.to_string()))? } } diff --git a/tailcall-typedefs/src/main.rs b/tailcall-typedefs/src/main.rs index aecf725b24..f147486ff8 100644 --- a/tailcall-typedefs/src/main.rs +++ b/tailcall-typedefs/src/main.rs @@ -7,14 +7,12 @@ use std::process::exit; use std::sync::Arc; use anyhow::{anyhow, Result}; -use schemars::schema::{RootSchema, Schema}; -use schemars::Map; +use schemars::schema::RootSchema; use serde_json::{json, Value}; -use strum::IntoEnumIterator; use tailcall::cli; -use tailcall::core::config::Config; +use tailcall::core::config::RuntimeConfig; use tailcall::core::tracing::default_tracing_for_name; -use tailcall::core::{scalar, FileIO}; +use tailcall::core::FileIO; static JSON_SCHEMA_FILE: &str = "generated/.tailcallrc.schema.json"; static GRAPHQL_SCHEMA_FILE: &str = "generated/.tailcallrc.graphql"; @@ -143,11 +141,7 @@ fn get_graphql_path() -> PathBuf { } fn get_updated_json() -> Result { - let mut schema: RootSchema = schemars::schema_for!(Config); - let scalar = scalar::Scalar::iter() - .map(|scalar| (scalar.name(), scalar.schema())) - .collect::>(); - schema.definitions.extend(scalar); + let schema: RootSchema = schemars::schema_for!(RuntimeConfig); let schema = json!(schema); Ok(schema)