Skip to content

Commit

Permalink
using Arc to optimize Blueprint cloning
Browse files Browse the repository at this point in the history
  • Loading branch information
shashitnak committed May 6, 2024
1 parent dd2114d commit b35b4bf
Show file tree
Hide file tree
Showing 11 changed files with 42 additions and 28 deletions.
8 changes: 5 additions & 3 deletions benches/impl_path_string_for_evaluation_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,14 +247,16 @@ fn request_context() -> RequestContext {
extensions: Arc::new(vec![]),
};
RequestContext::new(runtime)
.server(server)
.upstream(upstream)
.server(Arc::new(server))
.upstream(Arc::new(upstream))
}

pub fn bench_main(c: &mut Criterion) {
let mut req_ctx = request_context().allowed_headers(TEST_HEADERS.clone());

req_ctx.server.vars = TEST_VARS.clone();
let mut server = req_ctx.server.as_ref().clone();
server.vars = TEST_VARS.clone();
req_ctx.server = Arc::new(server);
let eval_ctx = EvaluationContext::new(&req_ctx, &MockGraphqlContext);

assert_test(&eval_ctx);
Expand Down
4 changes: 2 additions & 2 deletions src/app_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use crate::runtime::TargetRuntime;
pub struct AppContext {
pub schema: dynamic::Schema,
pub runtime: TargetRuntime,
pub blueprint: Blueprint,
pub blueprint: Arc<Blueprint>,
pub http_data_loaders: Arc<Vec<DataLoader<DataLoaderRequest, HttpDataLoader>>>,
pub gql_data_loaders: Arc<Vec<DataLoader<DataLoaderRequest, GraphqlDataLoader>>>,
pub grpc_data_loaders: Arc<Vec<DataLoader<grpc::DataLoaderRequest, GrpcDataLoader>>>,
Expand Down Expand Up @@ -117,7 +117,7 @@ impl AppContext {
AppContext {
schema,
runtime,
blueprint,
blueprint: Arc::new(blueprint),
http_data_loaders: Arc::new(http_data_loaders),
gql_data_loaders: Arc::new(gql_data_loaders),
grpc_data_loaders: Arc::new(grpc_data_loaders),
Expand Down
8 changes: 4 additions & 4 deletions src/blueprint/blueprint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ use crate::schema_extension::SchemaExtension;
#[derive(Clone, Debug, Default, Setters)]
pub struct Blueprint {
pub definitions: Vec<Definition>,
pub schema: SchemaDefinition,
pub server: Server,
pub upstream: Upstream,
pub telemetry: Telemetry,
pub schema: Arc<SchemaDefinition>,
pub server: Arc<Server>,
pub upstream: Arc<Upstream>,
pub telemetry: Arc<Telemetry>,
}

#[derive(Clone, Debug)]
Expand Down
19 changes: 12 additions & 7 deletions src/blueprint/from_config.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;

use async_graphql::dynamic::SchemaBuilder;

Expand All @@ -14,12 +15,13 @@ use crate::valid::{Valid, ValidationError, Validator};

pub fn config_blueprint<'a>() -> TryFold<'a, ConfigModule, Blueprint, String> {
let server = TryFoldConfig::<Blueprint>::new(|config_module, blueprint| {
Valid::from(Server::try_from(config_module.clone())).map(|server| blueprint.server(server))
Valid::from(Server::try_from(config_module.clone()))
.map(|server| blueprint.server(Arc::new(server)))
});

let schema = to_schema().transform::<Blueprint>(
|schema, blueprint| blueprint.schema(schema),
|blueprint| blueprint.schema,
|schema, blueprint| blueprint.schema(Arc::new(schema)),
|blueprint| blueprint.schema.as_ref().clone(),
);

let definitions = to_definitions().transform::<Blueprint>(
Expand All @@ -28,16 +30,17 @@ pub fn config_blueprint<'a>() -> TryFold<'a, ConfigModule, Blueprint, String> {
);

let upstream = TryFoldConfig::<Blueprint>::new(|config_module, blueprint| {
Valid::from(Upstream::try_from(config_module)).map(|upstream| blueprint.upstream(upstream))
Valid::from(Upstream::try_from(config_module))
.map(|upstream| blueprint.upstream(Arc::new(upstream)))
});

let links = TryFoldConfig::<Blueprint>::new(|config_module, blueprint| {
Valid::from(Links::try_from(config_module.links.clone())).map_to(blueprint)
});

let opentelemetry = to_opentelemetry().transform::<Blueprint>(
|opentelemetry, blueprint| blueprint.telemetry(opentelemetry),
|blueprint| blueprint.telemetry,
|opentelemetry, blueprint| blueprint.telemetry(Arc::new(opentelemetry)),
|blueprint| blueprint.telemetry.as_ref().clone(),
);

server
Expand All @@ -59,7 +62,9 @@ pub fn apply_batching(mut blueprint: Blueprint) -> Blueprint {
if let Some(Expression::IO(IO::Http { group_by: Some(_), .. })) =
field.resolver.clone()
{
blueprint.upstream.batch = blueprint.upstream.batch.or(Some(Batch::default()));
let mut upstream = blueprint.upstream.as_ref().clone();
upstream.batch = upstream.batch.or(Some(Batch::default()));
blueprint.upstream = Arc::new(upstream);
return blueprint;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/blueprint/telemetry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ pub enum TelemetryExporter {
Apollo(Apollo),
}

#[derive(Debug, Default, Clone)]
#[derive(Clone, Debug, Default)]
pub struct Telemetry {
pub export: Option<TelemetryExporter>,
pub request_headers: Vec<String>,
Expand Down
2 changes: 1 addition & 1 deletion src/cli/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ fn init_http(blueprint: &Blueprint) -> Arc<dyn HttpIO> {
// Provides access to http in native rust environment
fn init_http2_only(blueprint: &Blueprint) -> Arc<dyn HttpIO> {
let http_io = http::NativeHttp::init(
&blueprint.upstream.clone().http2_only(true),
&blueprint.upstream.as_ref().clone().http2_only(true),
&blueprint.telemetry,
);
init_hook_http(Arc::new(http_io), blueprint.server.script.clone())
Expand Down
4 changes: 2 additions & 2 deletions src/cli/server/server_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use crate::rest::{EndpointSet, Unchecked};
use crate::schema_extension::SchemaExtension;

pub struct ServerConfig {
pub blueprint: Blueprint,
pub blueprint: Arc<Blueprint>,
pub app_ctx: Arc<AppContext>,
}

Expand Down Expand Up @@ -39,7 +39,7 @@ impl ServerConfig {
let endpoints = endpoints.into_checked(&blueprint, rt.clone()).await?;
let app_context = Arc::new(AppContext::new(blueprint.clone(), rt, endpoints));

Ok(Self { app_ctx: app_context, blueprint })
Ok(Self { app_ctx: app_context, blueprint: Arc::new(blueprint) })
}

pub fn addr(&self) -> SocketAddr {
Expand Down
3 changes: 2 additions & 1 deletion src/cli/telemetry.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use std::io::Write;
use std::sync::Arc;

use anyhow::{anyhow, Result};
use once_cell::sync::Lazy;
Expand Down Expand Up @@ -193,7 +194,7 @@ fn set_tracing_subscriber(subscriber: impl Subscriber + Send + Sync) {
let _ = tracing::subscriber::set_global_default(subscriber);
}

pub fn init_opentelemetry(config: Telemetry, runtime: &TargetRuntime) -> anyhow::Result<()> {
pub fn init_opentelemetry(config: Arc<Telemetry>, runtime: &TargetRuntime) -> anyhow::Result<()> {
if let Some(export) = &config.export {
global::set_error_handler(|error| {
if !matches!(
Expand Down
14 changes: 9 additions & 5 deletions src/http/request_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ use crate::runtime::TargetRuntime;

#[derive(Setters)]
pub struct RequestContext {
pub server: Server,
pub upstream: Upstream,
pub server: Arc<Server>,
pub upstream: Arc<Upstream>,
pub x_response_headers: Arc<Mutex<HeaderMap>>,
pub cookie_headers: Option<Arc<Mutex<HeaderMap>>>,
// A subset of all the headers received in the GraphQL Request that will be sent to the
Expand Down Expand Up @@ -207,6 +207,8 @@ impl From<&AppContext> for RequestContext {

#[cfg(test)]
mod test {
use std::sync::Arc;

use cache_control::Cachability;

use crate::blueprint::{Server, Upstream};
Expand All @@ -220,8 +222,8 @@ mod test {
let upstream = Upstream::try_from(&config_module).unwrap();
let server = Server::try_from(config_module).unwrap();
RequestContext::new(crate::runtime::test::init(None))
.upstream(upstream)
.server(server)
.upstream(Arc::new(upstream))
.server(Arc::new(server))
}
}

Expand Down Expand Up @@ -269,7 +271,9 @@ mod test {
let mut upstream = Upstream::try_from(&config_module).unwrap();
let server = Server::try_from(config_module).unwrap();
upstream.batch = Some(Batch::default());
let req_ctx: RequestContext = RequestContext::default().upstream(upstream).server(server);
let req_ctx: RequestContext = RequestContext::default()
.upstream(Arc::new(upstream))
.server(Arc::new(server));

assert!(req_ctx.is_batching_enabled());
}
Expand Down
2 changes: 1 addition & 1 deletion src/http/request_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ fn not_found() -> Result<Response<Body>> {
}

fn create_request_context(req: &Request<Body>, app_ctx: &AppContext) -> RequestContext {
let upstream = app_ctx.blueprint.upstream.clone();
let upstream = app_ctx.blueprint.upstream.as_ref().clone();
let allowed = upstream.allowed_headers;
let allowed_headers = create_allowed_headers(req.headers(), &allowed);

Expand Down
4 changes: 3 additions & 1 deletion src/path.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,9 @@ mod tests {
static REQ_CTX: Lazy<RequestContext> = Lazy::new(|| {
let mut req_ctx = RequestContext::default().allowed_headers(TEST_HEADERS.clone());

req_ctx.server.vars = TEST_VARS.clone();
let mut server = req_ctx.server.as_ref().clone();
server.vars = TEST_VARS.clone();
req_ctx.server = Arc::new(server);
req_ctx.runtime.env = Arc::new(Env::init(TEST_ENV_VARS.clone()));

req_ctx
Expand Down

1 comment on commit b35b4bf

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Running 30s test @ http://localhost:8000/graphql

4 threads and 100 connections

Thread Stats Avg Stdev Max +/- Stdev
Latency 7.48ms 3.41ms 97.29ms 72.64%
Req/Sec 3.38k 190.01 3.74k 91.33%

404201 requests in 30.01s, 2.03GB read

Requests/sec: 13468.00

Transfer/sec: 69.13MB

Please sign in to comment.