diff --git a/README.md b/README.md index d4e11be..5838a2a 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ ## Installing Latest Zig Stable: `0.13.0` -Latest zzz release: `0.1.0` +Latest zzz release: `0.2.0` ``` -zig fetch --save git+https://github.com/mookums/zzz#v0.1.0 +zig fetch --save git+https://github.com/mookums/zzz#v0.2.0 ``` You can then add the dependency in your `build.zig` file: @@ -21,14 +21,14 @@ exe.root_module.addImport(zzz); ``` ## zzz? -zzz is a framework for writing performant and reliable networked services in Zig. It currently only supports TCP as the underlying transport layer and allows for any arbitrary protocol to run on top. It also natively supports TLS for securing connections. +zzz is a framework for writing performant and reliable networked services in Zig. It supports both HTTP and HTTPS (using BearSSL for TLS). -zzz currently supports Linux, Mac and Windows. Linux is currently the only target supported for deployments. +zzz currently supports Linux, Mac and Windows. Linux is currently the reccomended target for deployments. > [!IMPORTANT] > zzz is currently **alpha** software and there is still a lot changing at a fairly quick pace and certain places where things are less polished. -It focuses on modularity and portability, allowing you to swap in your own implementations for various things. Consumers can provide both a protocol and an async implementation, allowing for maximum flexibility. This allows for use in standard servers as well as embedded/bare metal domains. +It focuses on modularity and portability, allowing you to swap in your own implementations for various things. Consumers can provide an async implementation, allowing for maximum flexibility. This allows for use in standard servers as well as embedded/bare metal domains. For more information, look here: 1. [Getting Started](./docs/getting_started.md) @@ -51,20 +51,18 @@ With the recent migration to [tardy](https://github.com/mookums/tardy), zzz is a On the CCX63 instance on Hetzner with 2000 max connections, we are 70.9% faster than [zap](https://github.com/zigzap/zap) and 83.8% faster than [http.zig](https://github.com/karlseguin/http.zig). We also utilize less memory, using only ~3% of the memory used by zap and ~1.6% of the memory used by http.zig. -zzz can be configured to utilize minimal memory while remaining performant. The provided `minram` example only uses 256 kB (using `io_uring` and musl)! +zzz can be configured to utilize minimal memory while remaining performant. The provided `minram` example only uses 256 kB! ## Features - Built on top of [Tardy](https://github.com/mookums/tardy), an asynchronous runtime. - [Modular Asynchronous Implementation](https://muki.gg/post/modular-async) - `io_uring` for Linux (>= 5.1.0). - `epoll` for Linux (>= 2.5.45). + - `kqueue` for BSD & Mac. - `busy_loop` for Linux, Mac and Windows. -- Modular Protocol Implementation - - Allows for defining your own Protocol on top of TCP. - - Comes with: - - [HTTP/1.1](https://github.com/mookums/zzz/blob/main/src/http) - - HTTP/2 (planned) - - MQTT (planned) -- Single and Multi-threaded Support +- Single and Multithreaded Support - TLS using BearSSL -- (Almost) all memory allocated at startup +- Memory Pooling for minimal allocations + +## Contribution +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in zzz by you, shall be licensed as MPL2.0, without any additional terms or conditions. diff --git a/build.zig b/build.zig index 7d51ba0..9e38017 100644 --- a/build.zig +++ b/build.zig @@ -28,14 +28,14 @@ pub fn build(b: *std.Build) void { zzz.linkLibrary(bearssl); - add_example(b, "basic", .http, false, target, optimize, zzz); - add_example(b, "custom", .http, false, target, optimize, zzz); - add_example(b, "tls", .http, true, target, optimize, zzz); - add_example(b, "minram", .http, false, target, optimize, zzz); - add_example(b, "fs", .http, false, target, optimize, zzz); - add_example(b, "multithread", .http, false, target, optimize, zzz); - add_example(b, "benchmark", .http, false, target, optimize, zzz); - add_example(b, "valgrind", .http, true, target, optimize, zzz); + add_example(b, "basic", false, target, optimize, zzz, tardy); + add_example(b, "sse", false, target, optimize, zzz, tardy); + add_example(b, "tls", true, target, optimize, zzz, tardy); + add_example(b, "minram", false, target, optimize, zzz, tardy); + add_example(b, "fs", false, target, optimize, zzz, tardy); + add_example(b, "multithread", false, target, optimize, zzz, tardy); + add_example(b, "benchmark", false, target, optimize, zzz, tardy); + add_example(b, "valgrind", true, target, optimize, zzz, tardy); const tests = b.addTest(.{ .name = "tests", @@ -50,22 +50,18 @@ pub fn build(b: *std.Build) void { test_step.dependOn(&run_test.step); } -const Protocol = enum { - http, -}; - fn add_example( b: *std.Build, name: []const u8, - protocol: Protocol, link_libc: bool, target: std.Build.ResolvedTarget, optimize: std.builtin.Mode, zzz_module: *std.Build.Module, + tardy_module: *std.Build.Module, ) void { const example = b.addExecutable(.{ - .name = b.fmt("{s}_{s}", .{ @tagName(protocol), name }), - .root_source_file = b.path(b.fmt("./examples/{s}/{s}/main.zig", .{ @tagName(protocol), name })), + .name = name, + .root_source_file = b.path(b.fmt("./examples/{s}/main.zig", .{name})), .target = target, .optimize = optimize, .strip = false, @@ -76,17 +72,18 @@ fn add_example( } example.root_module.addImport("zzz", zzz_module); + example.root_module.addImport("tardy", tardy_module); + const install_artifact = b.addInstallArtifact(example, .{}); + b.getInstallStep().dependOn(&install_artifact.step); - const run_cmd = b.addRunArtifact(example); - run_cmd.step.dependOn(&install_artifact.step); - if (b.args) |args| { - run_cmd.addArgs(args); - } + const build_step = b.step(b.fmt("{s}", .{name}), b.fmt("Build zzz example ({s})", .{name})); + build_step.dependOn(&install_artifact.step); + + const run_artifact = b.addRunArtifact(example); + run_artifact.step.dependOn(&install_artifact.step); - const run_step = b.step( - b.fmt("run_{s}_{s}", .{ @tagName(protocol), name }), - b.fmt("Run {s} {s}", .{ @tagName(protocol), name }), - ); - run_step.dependOn(&run_cmd.step); + const run_step = b.step(b.fmt("run_{s}", .{name}), b.fmt("Run zzz example ({s})", .{name})); + run_step.dependOn(&install_artifact.step); + run_step.dependOn(&run_artifact.step); } diff --git a/build.zig.zon b/build.zig.zon index d907180..e2315da 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,11 +1,11 @@ .{ .name = "zzz", - .version = "0.0.0", + .version = "0.2.0", .minimum_zig_version = "0.13.0", .dependencies = .{ .tardy = .{ - .url = "git+https://github.com/mookums/tardy#12a2bcae25b34c4eb34ab5e3b5db101823a61cd6", - .hash = "122073200a2412251ad1e7eb322d9d04868a1444f98bdb4d47bb630491806c8d36d4", + .url = "git+https://github.com/mookums/tardy?ref=v0.1.0#ae0970d6b3fa5b03625b14e142c664efe1fd7789", + .hash = "12207f5afee3b8933c1c32737e8feedc80a2e4feebe058739509094c812e4a8d2cc8", }, .bearssl = .{ .url = "https://github.com/mookums/bearssl-zig/archive/37a96eee56fe2543579bbc6da148ca886f3dd32b.tar.gz", @@ -14,6 +14,8 @@ }, .paths = .{ + "README.md", + "LICENSE", "build.zig", "build.zig.zon", "src", diff --git a/docs/getting_started.md b/docs/getting_started.md index 5e46ee4..318cc1f 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -1,18 +1,32 @@ # Getting Started zzz is a networking framework that allows for modularity and flexibility in design. For most use cases, this flexibility is not a requirement and so various defaults are provided. -For this guide, we will assume that you are running on a modern Linux platform and looking to design a service that utilizes HTTP. +For this guide, we will assume that you are running on a modern Linux platform and looking to design a service that utilizes HTTP. We will need both `zzz` and `tardy` for this to work. +You will need to match the version of Tardy that zzz is currently using to the version of Tardy you currently use within your program. This will eventually be standardized. -`zig fetch --save git+https://github.com/mookums/zzz#main` +These are the current latest releases and are compatible. + +`zig fetch --save git+https://github.com/mookums/zzz#v0.2.0` + +`zig fetch --save git+https://github.com/mookums/tardy#v0.1.0` ## Hello, World! We can write a quick example that serves out "Hello, World" responses to any client that connects to the server. This example is the same as the one that is provided within the `examples/basic` directory. ```zig const std = @import("std"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/basic"); + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; pub fn main() !void { const host: []const u8 = "0.0.0.0"; @@ -22,21 +36,33 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + // Creating our Tardy instance that will spawn our runtimes. + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + }); + defer t.deinit(); + + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = + const num: i8 = 12; + + try router.serve_route("/", Route.init().get(&num, struct { + pub fn handler_fn(ctx: *Context, id: *const i8) !void { + const body_fmt = \\ \\ \\ \\

Hello, World!

+ \\

id: {d}

\\ \\ ; - response.set(.{ + const body = try std.fmt.allocPrint(ctx.allocator, body_fmt, .{id.*}); + + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -44,14 +70,24 @@ pub fn main() !void { } }.handler_fn)); - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); + // This provides the entry function into every Tardy runtime. + // This runs once within each runtime that spawns. + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); } ``` diff --git a/docs/https.md b/docs/https.md index f2950b3..ece4156 100644 --- a/docs/https.md +++ b/docs/https.md @@ -7,23 +7,40 @@ zzz utilizes [BearSSL](https://bearssl.org/) to provide a safe and performant TL This is derived from the example at `examples/tls` and utilizes some certificates that are present within the repository. ```zig const std = @import("std"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/tls"); + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.{ .tls = .{ + .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, + .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, + .cert_name = "CERTIFICATE", + .key_name = "EC PRIVATE KEY", +} }); + +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -33,7 +50,7 @@ pub fn main() !void { \\ ; - response.set(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -41,39 +58,28 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); - } - }.handler_fn)); - - var server = http.Server( - .{ - .tls = .{ - .cert = .{ - .file = .{ .path = "./examples/http/tls/certs/cert.pem" }, - }, - .key = .{ - .file = .{ .path = "./examples/http/tls/certs/key.pem" }, - }, - .cert_name = "CERTIFICATE", - .key_name = "EC PRIVATE KEY", - }, - }, - .auto, - ).init(.{ + var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, }); - defer server.deinit(); + defer t.deinit(); - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); } ``` This example above passes the `.tls` variant of the enum to the HTTP Server and provides the location of the certificate and key to be used. It also has the functionality to pass in a buffer containing the cert and key data if that is preferable. You must also provide the certificate and key name as the PEM format allows for multiple items to be placed within the same file. diff --git a/docs/performance.md b/docs/performance.md index 90b96a4..57b5a0a 100644 --- a/docs/performance.md +++ b/docs/performance.md @@ -4,16 +4,12 @@ zzz's design philosophy results in a lot of knobs that the consumer of the libra These performance tips are general and can apply to any protocol implementation. HTTP is used as the general example because it is currently the only completed protocol. ## Performance Hunting -zzz now officially runs multithreaded by default. By default, it will utilize `@min(cpu_count / 2 - 1, 1)` threads. This can be tuned by using the `.threading` flag. +zzz now officially runs multithreaded by default. By default, it will utilize `@min(cpu_count / 2 - 1, 1)` threads. This can be tuned by changing the `.threading` option of the Tardy runtime. ```zig -var server = http.Server(.plain, .auto).init(.{ +var t = try Tardy.init(.{ .allocator = allocator, - .threading = .{ .multi = COUNT }, - .size_backlog = 32, - .size_connections_max = 16, - .size_connection_arena_retain = 64, - .size_socket_buffer = 512, + .threading = .{ .multi = COUNT}, }); ``` @@ -30,7 +26,7 @@ Other settings of note include: When using zzz in certain environments, your goal may be to reduce memory usage. zzz provides a variety of controls for handling how much memory is allocated at start up. ```zig -var server = http.Server(.plain, .auto).init(.{ +var server = Server.init(.{ .allocator = allocator, .size_backlog = 32, .size_connections_max = 16, diff --git a/examples/basic/main.zig b/examples/basic/main.zig new file mode 100644 index 0000000..2089e66 --- /dev/null +++ b/examples/basic/main.zig @@ -0,0 +1,80 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/basic"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + // Creating our Tardy instance that + // will spawn our runtimes. + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + const num: i8 = 12; + + try router.serve_route("/", Route.init().get(&num, struct { + pub fn handler_fn(ctx: *Context, id: *const i8) !void { + const body_fmt = + \\ + \\ + \\ + \\

Hello, World!

+ \\

id: {d}

+ \\ + \\ + ; + + const body = try std.fmt.allocPrint(ctx.allocator, body_fmt, .{id.*}); + + // This is the standard response and what you + // will usually be using. This will send to the + // client and then continue to await more requests. + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body[0..], + }); + } + }.handler_fn)); + + // This provides the entry function into the Tardy runtime. This will run + // exactly once inside of each runtime (each thread gets a single runtime). + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/examples/http/benchmark/index.html b/examples/benchmark/index.html similarity index 100% rename from examples/http/benchmark/index.html rename to examples/benchmark/index.html diff --git a/examples/http/benchmark/main.zig b/examples/benchmark/main.zig similarity index 55% rename from examples/http/benchmark/main.zig rename to examples/benchmark/main.zig index b17e53d..573bc4a 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/benchmark/main.zig @@ -1,16 +1,26 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/benchmark"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/benchmark"); + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; pub const std_options = .{ .log_level = .err, }; -fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) void { - const name = context.captures[0].string; +fn hi_handler(ctx: *Context, _: void) !void { + const name = ctx.captures[0].string; - const body = std.fmt.allocPrint(context.allocator, + const body = try std.fmt.allocPrint(ctx.allocator, \\ \\ \\ @@ -27,16 +37,9 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) \\ \\ \\ - , .{name}) catch { - response.set(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Out of Memory!", - }); - return; - }; + , .{name}); - response.set(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body, @@ -46,21 +49,36 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; + var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true }){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); - defer router.deinit(); - try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", http.Route.init().get(hi_handler)); - - var server = http.Server(.plain, .auto).init(.{ + var t = try Tardy.init(.{ .allocator = allocator, .threading = .auto, }); - defer server.deinit(); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); + try router.serve_route("/hi/%s", Route.init().get({}, hi_handler)); - try server.bind(host, port); - try server.listen(.{ .router = &router }); + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); } diff --git a/examples/fs/main.zig b/examples/fs/main.zig new file mode 100644 index 0000000..36d067c --- /dev/null +++ b/examples/fs/main.zig @@ -0,0 +1,78 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/fs"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .auto, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + const body = + \\ + \\ + \\ + \\

Hello, World!

+ \\ + \\ + ; + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body[0..], + }); + } + }.handler_fn)); + + try router.serve_route("/kill", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + ctx.runtime.stop(); + } + }.handler_fn)); + + try router.serve_fs_dir("/static", "./examples/http/fs/static"); + + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + Server.clean(rt) catch unreachable; + } + }.exit, + ); +} diff --git a/examples/http/fs/static/index.html b/examples/fs/static/index.html similarity index 100% rename from examples/http/fs/static/index.html rename to examples/fs/static/index.html diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig deleted file mode 100644 index bc9f337..0000000 --- a/examples/http/basic/main.zig +++ /dev/null @@ -1,44 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/basic"); - -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); - defer _ = gpa.deinit(); - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = - \\ - \\ - \\ - \\

Hello, World!

- \\ - \\ - ; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body[0..], - }); - } - }.handler_fn)); - - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig deleted file mode 100644 index 5380ff4..0000000 --- a/examples/http/fs/main.zig +++ /dev/null @@ -1,39 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/fs"); - -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - const allocator = std.heap.page_allocator; - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = - \\ - \\ - \\ - \\

Hello, World!

- \\ - \\ - ; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body[0..], - }); - } - }.handler_fn)); - - try router.serve_fs_dir("/static", "./examples/http/fs/static"); - - var server = http.Server(.plain, .auto).init(.{ .allocator = allocator }); - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig deleted file mode 100644 index c81a0fb..0000000 --- a/examples/http/minram/main.zig +++ /dev/null @@ -1,54 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/minram"); - -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - var gpa = std.heap.GeneralPurposeAllocator(.{ .enable_memory_limit = true }){ .requested_memory_limit = 1024 * 300 }; - const allocator = gpa.allocator(); - defer _ = gpa.deinit(); - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = - \\ - \\ - \\ - \\

Hello, World!

- \\ - \\ - ; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body[0..], - }); - } - }.handler_fn)); - - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .single, - .size_backlog = 32, - .size_connections_max = 16, - .size_connection_arena_retain = 64, - .size_completions_reap_max = 8, - .size_socket_buffer = 512, - }); - - try server.bind(host, port); - try server.listen(.{ - .router = &router, - .num_header_max = 32, - .num_captures_max = 0, - .size_request_max = 2048, - .size_request_uri_max = 256, - }); -} diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig deleted file mode 100644 index d434b67..0000000 --- a/examples/http/multithread/main.zig +++ /dev/null @@ -1,94 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/multithread"); - -fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) void { - const name = context.captures[0].string; - const greeting = context.queries.get("greeting") orelse "Hi"; - - const body = std.fmt.allocPrint(context.allocator, - \\ - \\ - \\ - \\ - \\

{s}, {s}!

- \\ click to go home! - \\

Enter a name to say hi!

- \\ - \\ - \\ - \\ - , .{ greeting, name }) catch { - response.set(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Out of Memory!", - }); - return; - }; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body, - }); -} - -fn redir_handler(_: http.Request, response: *http.Response, context: http.Context) void { - _ = context; - response.set(.{ - .status = .@"Permanent Redirect", - .mime = http.Mime.HTML, - .body = "", - }); - - response.headers.add("Location", "/hi/redirect") catch { - response.set(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Redirect Handler Failed", - }); - return; - }; -} - -fn post_handler(request: http.Request, response: *http.Response, _: http.Context) void { - log.debug("Body: {s}", .{request.body}); - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = "", - }); -} - -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - // if multithreaded, you need a thread-safe allocator. - const allocator = std.heap.page_allocator; - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", http.Route.init().get(hi_handler)); - try router.serve_route("/redirect", http.Route.init().get(redir_handler)); - try router.serve_route("/post", http.Route.init().post(post_handler)); - - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .auto, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig deleted file mode 100644 index c923d07..0000000 --- a/examples/http/tls/main.zig +++ /dev/null @@ -1,70 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/tls"); -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - const allocator = std.heap.c_allocator; - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); - - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = - \\ - \\ - \\ - \\ - \\ - \\ - \\

Hello, World!

- \\ - \\ - ; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body[0..], - }); - } - }.handler_fn)); - - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); - } - }.handler_fn)); - - var server = http.Server( - .{ - .tls = .{ - .cert = .{ - .file = .{ .path = "./examples/http/tls/certs/cert.pem" }, - }, - .key = .{ - .file = .{ .path = "./examples/http/tls/certs/key.pem" }, - }, - .cert_name = "CERTIFICATE", - .key_name = "EC PRIVATE KEY", - }, - }, - .auto, - ).init(.{ - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig deleted file mode 100644 index 83d3dc2..0000000 --- a/examples/http/valgrind/main.zig +++ /dev/null @@ -1,52 +0,0 @@ -const std = @import("std"); -const zzz = @import("zzz"); -const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/valgrind"); - -pub fn main() !void { - const host: []const u8 = "0.0.0.0"; - const port: u16 = 9862; - - const allocator = std.heap.c_allocator; - - var router = http.Router.init(allocator); - defer router.deinit(); - - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = - \\ - \\ - \\ - \\

Hello, World!

- \\ - \\ - ; - - response.set(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = body[0..], - }); - } - }.handler_fn)); - - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); - } - }.handler_fn)); - - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} diff --git a/examples/minram/main.zig b/examples/minram/main.zig new file mode 100644 index 0000000..2d43a6f --- /dev/null +++ b/examples/minram/main.zig @@ -0,0 +1,86 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/minram"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + var gpa = std.heap.GeneralPurposeAllocator( + .{ .enable_memory_limit = true }, + ){ .requested_memory_limit = 1024 * 300 }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + const max_conn = 16; + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + const body = + \\ + \\ + \\ + \\

Hello, World!

+ \\ + \\ + ; + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body[0..], + }); + } + }.handler_fn)); + + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ + .allocator = rt.allocator, + .size_backlog = 32, + .size_connections_max = max_conn, + .size_connection_arena_retain = 64, + .size_completions_reap_max = 8, + .size_socket_buffer = 512, + .num_header_max = 32, + .num_captures_max = 0, + .size_request_max = 2048, + .size_request_uri_max = 256, + }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/examples/http/multithread/index.html b/examples/multithread/index.html similarity index 100% rename from examples/http/multithread/index.html rename to examples/multithread/index.html diff --git a/examples/multithread/main.zig b/examples/multithread/main.zig new file mode 100644 index 0000000..fa87c1f --- /dev/null +++ b/examples/multithread/main.zig @@ -0,0 +1,107 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/multithread"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + +fn hi_handler(ctx: *Context, _: void) !void { + const name = ctx.captures[0].string; + const greeting = ctx.queries.get("greeting") orelse "Hi"; + + const body = try std.fmt.allocPrint(ctx.allocator, + \\ + \\ + \\ + \\ + \\

{s}, {s}!

+ \\ click to go home! + \\

Enter a name to say hi!

+ \\ + \\ + \\ + \\ + , .{ greeting, name }); + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body, + }); +} + +fn redir_handler(ctx: *Context, _: void) !void { + try ctx.response.headers.add("Location", "/hi/redirect"); + + try ctx.respond(.{ + .status = .@"Permanent Redirect", + .mime = http.Mime.HTML, + .body = "", + }); +} + +fn post_handler(ctx: *Context, _: void) !void { + log.debug("Body: {s}", .{ctx.request.body}); + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = "", + }); +} + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + // if multithreaded, you need a thread-safe allocator. + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .auto, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); + try router.serve_route("/hi/%s", Route.init().get({}, hi_handler)); + try router.serve_route("/redirect", Route.init().get({}, redir_handler)); + try router.serve_route("/post", Route.init().post({}, post_handler)); + + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/examples/sse/index.html b/examples/sse/index.html new file mode 100644 index 0000000..218bccc --- /dev/null +++ b/examples/sse/index.html @@ -0,0 +1,85 @@ + + + + + + SSE Example + + +

Server-Sent Events Example

+ +
+ +
+
+ + + + + + diff --git a/examples/sse/main.zig b/examples/sse/main.zig new file mode 100644 index 0000000..8a9c767 --- /dev/null +++ b/examples/sse/main.zig @@ -0,0 +1,143 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/sse"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; +const Task = tardy.Task; +const Broadcast = tardy.Broadcast; +const Channel = tardy.Channel; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; +const SSE = Server.SSE; + +// When using SSE, you end up leaving the various abstractions that zzz has setup for you +// and you begin programming more against the tardy runtime. + +const SSEBroadcastContext = struct { + sse: *SSE, + channel: *Channel(usize), +}; + +fn sse_send(_: *Runtime, value_opt: ?*const usize, ctx: *SSEBroadcastContext) !void { + if (value_opt) |value| { + const data = try std.fmt.allocPrint( + ctx.sse.allocator, + "value: {d}", + .{value.*}, + ); + + try ctx.sse.send(.{ .data = data }, ctx, sse_recv); + } else { + const broadcast = ctx.sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + broadcast.unsubscribe(ctx.channel); + try ctx.sse.context.close(); + } +} + +fn sse_recv(_: *Runtime, success: bool, ctx: *SSEBroadcastContext) !void { + if (success) { + try ctx.channel.recv(ctx, sse_send); + } else { + log.debug("channel closed", .{}); + const broadcast = ctx.sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + broadcast.unsubscribe(ctx.channel); + } +} + +fn sse_init(rt: *Runtime, success: bool, sse: *SSE) !void { + if (!success) { + // on failure, it'll auto close after + // the sse initalization task runs. + log.err("sse initalization failed", .{}); + return; + } + + const broadcast = sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + const context = try sse.allocator.create(SSEBroadcastContext); + context.* = .{ .sse = sse, .channel = try broadcast.subscribe(rt, 10) }; + try context.channel.recv(context, sse_send); +} + +fn sse_handler(ctx: *Context, _: void) !void { + log.debug("going into sse mode", .{}); + try ctx.to_sse(sse_init); +} + +fn msg_handler(ctx: *Context, broadcast: *Broadcast(usize)) !void { + log.debug("message handler", .{}); + try broadcast.send(0); + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = "", + }); +} + +fn kill_handler(ctx: *Context, _: void) !void { + ctx.runtime.stop(); +} + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + const max_conn = 512; + + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .{ .multi = 2 }, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + var broadcast = try Broadcast(usize).init(allocator, max_conn); + defer broadcast.deinit(); + + try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); + try router.serve_route("/kill", Route.init().get({}, kill_handler)); + try router.serve_route("/stream", Route.init().get({}, sse_handler)); + try router.serve_route("/message", Route.init().post(&broadcast, msg_handler)); + + const EntryParams = struct { + router: *const Router, + broadcast: *Broadcast(usize), + }; + + try t.entry( + EntryParams{ .router = &router, .broadcast = &broadcast }, + struct { + fn entry(rt: *Runtime, params: EntryParams) !void { + try rt.storage.store_ptr("broadcast", params.broadcast); + + var server = Server.init(.{ + .allocator = rt.allocator, + .size_connections_max = max_conn, + }); + + try server.bind(host, port); + try server.serve(params.router, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/examples/http/tls/certs/cert.pem b/examples/tls/certs/cert.pem similarity index 100% rename from examples/http/tls/certs/cert.pem rename to examples/tls/certs/cert.pem diff --git a/examples/http/tls/certs/key.pem b/examples/tls/certs/key.pem similarity index 100% rename from examples/http/tls/certs/key.pem rename to examples/tls/certs/key.pem diff --git a/examples/http/tls/embed/pico.min.css b/examples/tls/embed/pico.min.css similarity index 100% rename from examples/http/tls/embed/pico.min.css rename to examples/tls/embed/pico.min.css diff --git a/examples/tls/main.zig b/examples/tls/main.zig new file mode 100644 index 0000000..9c4e786 --- /dev/null +++ b/examples/tls/main.zig @@ -0,0 +1,87 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/tls"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.{ .tls = .{ + .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, + .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, + .cert_name = "CERTIFICATE", + .key_name = "EC PRIVATE KEY", +} }); + +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); + + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + const body = + \\ + \\ + \\ + \\ + \\ + \\ + \\

Hello, World!

+ \\ + \\ + ; + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body[0..], + }); + } + }.handler_fn)); + + try router.serve_route("/kill", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + ctx.runtime.stop(); + } + }.handler_fn)); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + }); + defer t.deinit(); + + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/examples/valgrind/main.zig b/examples/valgrind/main.zig new file mode 100644 index 0000000..4d6bbf5 --- /dev/null +++ b/examples/valgrind/main.zig @@ -0,0 +1,73 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/valgrind"); +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + + var gpa = std.heap.GeneralPurposeAllocator(.{}){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + const body = + \\ + \\ + \\ + \\

Hello, World!

+ \\ + \\ + ; + + try ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = body[0..], + }); + } + }.handler_fn)); + + try router.serve_route("/kill", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + ctx.runtime.stop(); + } + }.handler_fn)); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + }); + defer t.deinit(); + + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); +} diff --git a/flake.lock b/flake.lock index 34380c1..2451aa0 100644 --- a/flake.lock +++ b/flake.lock @@ -139,11 +139,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1727469213, - "narHash": "sha256-d/65qZtXwVpIwxBTdNuf484Ie58wa5CYK5h+aXsLDoQ=", + "lastModified": 1731172921, + "narHash": "sha256-LTwbJCRZua1cZCiaisO1pnX446EKSZcYy9730jX1F4w=", "owner": "nixos", "repo": "nixpkgs", - "rev": "a9a9e085f155b55e4a6dc49a16572b2c799ba66f", + "rev": "0b88841b5eb61720be435a9e40f69bd853fa232c", "type": "github" }, "original": { @@ -260,11 +260,11 @@ "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1727439069, - "narHash": "sha256-qonN+4DQtubTpV5VluIX57aq6J0UurtW2IGz8dtIzJE=", + "lastModified": 1731154207, + "narHash": "sha256-hWLW7Bt/6614gxX0tbnSW9aJMahOAIQW+LC5HZix+XA=", "owner": "mitchellh", "repo": "zig-overlay", - "rev": "93b14ef3d7f496ae49c79ba392eb3351cb92b57a", + "rev": "9f756b991d0fe79f322432dcd6e97e4ffccceb61", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 23795c2..69863fc 100644 --- a/flake.nix +++ b/flake.nix @@ -1,5 +1,5 @@ { - description = "a high performance communicaton abstraction library"; + description = "a framework for writing performant and reliable networked services"; inputs = { nixpkgs.url = "github:nixos/nixpkgs/release-24.05"; @@ -33,7 +33,6 @@ gdb valgrind # Benchmarking - linuxPackages.perf wrk ]; }; diff --git a/src/core/job.zig b/src/core/job.zig index 01ae302..378b7cb 100644 --- a/src/core/job.zig +++ b/src/core/job.zig @@ -1,7 +1,17 @@ const std = @import("std"); const Pseudoslice = @import("lib.zig").Pseudoslice; -pub const SendType = struct { +const TaskFn = @import("tardy").TaskFn; + +pub const AfterType = union(enum) { + recv, + sse: struct { + func: *const anyopaque, + ctx: *anyopaque, + }, +}; +pub const SendInner = struct { + after: AfterType, slice: Pseudoslice, count: usize, security: union(enum) { @@ -24,6 +34,6 @@ pub const Job = union(enum) { accept, handshake: struct { state: enum { recv, send }, count: usize }, recv: struct { count: usize }, - send: SendType, + send: SendInner, close, }; diff --git a/src/core/lib.zig b/src/core/lib.zig index e091ee8..f21f77a 100644 --- a/src/core/lib.zig +++ b/src/core/lib.zig @@ -1,3 +1,2 @@ pub const Job = @import("job.zig").Job; pub const Pseudoslice = @import("pseudoslice.zig").Pseudoslice; -pub const Server = @import("server.zig").Server; diff --git a/src/core/pseudoslice.zig b/src/core/pseudoslice.zig index e3a9660..c4d519b 100644 --- a/src/core/pseudoslice.zig +++ b/src/core/pseudoslice.zig @@ -21,7 +21,7 @@ pub const Pseudoslice = struct { /// Operates like a slice. That means it does not capture the end. /// Start is an inclusive bound and end is an exclusive bound. - pub fn get(self: *Pseudoslice, start: usize, end: usize) []const u8 { + pub fn get(self: *const Pseudoslice, start: usize, end: usize) []const u8 { assert(end >= start); assert(self.shared.len >= end - start); const clamped_end = @min(end, self.len); diff --git a/src/core/server.zig b/src/core/server.zig deleted file mode 100644 index f8abe54..0000000 --- a/src/core/server.zig +++ /dev/null @@ -1,846 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const assert = std.debug.assert; -const log = std.log.scoped(.@"zzz/server"); - -const Pseudoslice = @import("pseudoslice.zig").Pseudoslice; -const ZProvision = @import("zprovision.zig").ZProvision; - -const TLSFileOptions = @import("../tls/lib.zig").TLSFileOptions; -const TLSContext = @import("../tls/lib.zig").TLSContext; -const TLS = @import("../tls/lib.zig").TLS; - -const Pool = @import("tardy").Pool; -pub const Threading = @import("tardy").TardyThreading; -pub const Runtime = @import("tardy").Runtime; -pub const Task = @import("tardy").Task; -pub const AsyncIOType = @import("tardy").AsyncIOType; -const TardyCreator = @import("tardy").Tardy; -const Cross = @import("tardy").Cross; - -pub const RecvStatus = union(enum) { - kill, - recv, - send: Pseudoslice, -}; - -/// Security Model to use. -/// -/// Default: .plain (plaintext) -pub const Security = union(enum) { - plain, - tls: struct { - cert: TLSFileOptions, - key: TLSFileOptions, - cert_name: []const u8 = "CERTIFICATE", - key_name: []const u8 = "PRIVATE KEY", - }, -}; - -/// These are various general configuration -/// options that are important for the actual framework. -/// -/// This includes various different options and limits -/// for interacting with the underlying network. -pub const zzzConfig = struct { - /// The allocator that server will use. - allocator: std.mem.Allocator, - /// Threading Model to use. - /// - /// Default: .auto - threading: Threading = .auto, - /// Kernel Backlog Value. - size_backlog: u31 = 512, - /// Number of Maximum Concurrent Connections. - /// - /// This is applied PER thread if using multi-threading. - /// zzz will drop/close any connections greater - /// than this. - /// - /// You want to tune this to your expected number - /// of maximum connections. - /// - /// Default: 1024 - size_connections_max: u16 = 1024, - /// Maximum number of completions we can reap - /// with a single call of reap(). - /// - /// Default: 256 - size_completions_reap_max: u16 = 256, - /// Amount of allocated memory retained - /// after an arena is cleared. - /// - /// A higher value will increase memory usage but - /// should make allocators faster.Tardy - /// - /// A lower value will reduce memory usage but - /// will make allocators slower. - /// - /// Default: 1KB - size_connection_arena_retain: u32 = 1024, - /// Size of the buffer (in bytes) used for - /// interacting with the socket. - /// - /// Default: 4 KB. - size_socket_buffer: u32 = 1024 * 4, - /// Maximum size (in bytes) of the Recv buffer. - /// This is mainly a concern when you are reading in - /// large requests before responding. - /// - /// Default: 2MB. - size_recv_buffer_max: u32 = 1024 * 1024 * 2, -}; - -fn RecvFn(comptime ProtocolData: type, comptime ProtocolConfig: type) type { - return *const fn ( - rt: *Runtime, - provision: *ZProvision(ProtocolData), - p_config: *const ProtocolConfig, - z_config: *const zzzConfig, - recv_buffer: []const u8, - ) RecvStatus; -} - -pub fn Server( - comptime security: Security, - comptime async_type: AsyncIOType, - comptime ProtocolData: type, - comptime ProtocolConfig: type, - comptime recv_fn: RecvFn(ProtocolData, ProtocolConfig), -) type { - const TLSContextType = comptime if (security == .tls) TLSContext else void; - const TLSType = comptime if (security == .tls) ?TLS else void; - const Provision = ZProvision(ProtocolData); - const Tardy = TardyCreator(async_type); - - return struct { - const Self = @This(); - allocator: std.mem.Allocator, - tardy: Tardy, - config: zzzConfig, - addr: std.net.Address, - tls_ctx: TLSContextType, - - pub fn init(config: zzzConfig) Self { - const tls_ctx = switch (comptime security) { - .tls => |inner| TLSContext.init(.{ - .allocator = config.allocator, - .cert = inner.cert, - .cert_name = inner.cert_name, - .key = inner.key, - .key_name = inner.key_name, - .size_tls_buffer_max = config.size_socket_buffer * 2, - }) catch unreachable, - .plain => void{}, - }; - - return Self{ - .allocator = config.allocator, - .tardy = Tardy.init(.{ - .allocator = config.allocator, - .threading = config.threading, - .size_tasks_max = config.size_connections_max, - .size_aio_jobs_max = config.size_connections_max, - .size_aio_reap_max = config.size_completions_reap_max, - }) catch unreachable, - .config = config, - .addr = undefined, - .tls_ctx = tls_ctx, - }; - } - - pub fn deinit(self: *Self) void { - if (comptime security == .tls) { - self.tls_ctx.deinit(); - } - - self.tardy.deinit(); - } - - fn create_socket(self: *const Self) !std.posix.socket_t { - const socket: std.posix.socket_t = blk: { - const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; - break :blk try std.posix.socket( - self.addr.any.family, - socket_flags, - std.posix.IPPROTO.TCP, - ); - }; - - log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); - - if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT_LB, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEADDR, - &std.mem.toBytes(@as(c_int, 1)), - ); - } - - try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); - return socket; - } - - /// If you are using a custom implementation that does NOT rely - /// on TCP/IP, you can SKIP calling this method and just set the - /// socket value yourself. - /// - /// This is only allowed on certain targets that do not have TCP/IP - /// support. - pub fn bind(self: *Self, host: []const u8, port: u16) !void { - assert(host.len > 0); - assert(port > 0); - - self.addr = blk: { - switch (comptime builtin.os.tag) { - .windows => break :blk try std.net.Address.parseIp(host, port), - else => break :blk try std.net.Address.resolveIp(host, port), - } - }; - } - - fn close_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .close); - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - log.info("{d} - closing connection", .{provision.index}); - - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - tls_ptr.*.?.deinit(); - tls_ptr.* = null; - } - - provision.socket = Cross.socket.INVALID_SOCKET; - provision.job = .empty; - _ = provision.arena.reset(.{ .retain_with_limit = z_config.size_connection_arena_retain }); - provision.data.clean(); - provision.recv_buffer.clearRetainingCapacity(); - pool.release(provision.index); - - const accept_queued = rt.storage.get_ptr("accept_queued", bool); - if (!accept_queued.*) { - accept_queued.* = true; - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); - } - } - - fn accept_task(rt: *Runtime, t: *const Task, _: ?*anyopaque) !void { - const child_socket = t.result.?.socket; - - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const accept_queued = rt.storage.get_ptr("accept_queued", bool); - accept_queued.* = false; - - if (rt.scheduler.tasks.clean() >= 2) { - accept_queued.* = true; - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); - } - - if (!Cross.socket.is_valid(child_socket)) { - log.err("socket accept failed", .{}); - return error.AcceptFailed; - } - - // This should never fail. It means that we have a dangling item. - assert(pool.clean() > 0); - const borrowed = pool.borrow_hint(t.index) catch unreachable; - - log.info("{d} - accepting connection", .{borrowed.index}); - log.debug( - "empty provision slots: {d}", - .{pool.items.len - pool.dirty.count()}, - ); - assert(borrowed.item.job == .empty); - - try Cross.socket.disable_nagle(child_socket); - try Cross.socket.to_nonblock(child_socket); - - const provision = borrowed.item; - - // Store the index of this item. - provision.index = @intCast(borrowed.index); - provision.socket = child_socket; - - switch (comptime security) { - .tls => |_| { - const tls_ctx = rt.storage.get_const_ptr("tls_ctx", TLSContextType); - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* == null); - - tls_ptr.* = tls_ctx.create(child_socket) catch |e| { - log.err("{d} - tls creation failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSCreationFailed; - }; - - const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { - log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSStartHandshakeFailed; - }; - - provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = recv_buf, - .func = handshake_task, - .ctx = borrowed.item, - }); - }, - .plain => { - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = borrowed.item, - }); - }, - } - } - - fn recv_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .recv); - const length: i32 = t.result.?.value; - - const p_config = rt.storage.get_const_ptr("p_config", ProtocolConfig); - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - const recv_job = &provision.job.recv; - - // If the socket is closed. - if (length <= 0) { - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return; - } - - log.debug("{d} - recv triggered", .{provision.index}); - - const recv_count: usize = @intCast(length); - recv_job.count += recv_count; - const pre_recv_buffer = provision.buffer[0..recv_count]; - - const recv_buffer = blk: { - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { - log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSDecryptFailed; - }; - }, - .plain => break :blk pre_recv_buffer, - } - }; - - var status: RecvStatus = recv_fn(rt, provision, p_config, z_config, recv_buffer); - - switch (status) { - .kill => { - rt.stop(); - return error.Killed; - }, - .recv => { - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - .send => |*pslice| { - const plain_buffer = pslice.get(0, z_config.size_socket_buffer); - - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSEncryptFailed; - }; - - provision.job = .{ - .send = .{ - .slice = pslice.*, - .count = @intCast(plain_buffer.len), - .security = .{ - .tls = .{ - .encrypted = encrypted_buffer, - .encrypted_count = 0, - }, - }, - }, - }; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = encrypted_buffer, - .func = send_task, - .ctx = provision, - }); - }, - .plain => { - provision.job = .{ - .send = .{ - .slice = pslice.*, - .count = 0, - .security = .plain, - }, - }; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); - }, - } - }, - } - } - - fn handshake_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - log.debug("Handshake Task", .{}); - assert(security == .tls); - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - const length: i32 = t.result.?.value; - - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - assert(provision.job == .handshake); - const handshake_job = &provision.job.handshake; - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - log.debug("processing handshake", .{}); - handshake_job.count += 1; - - if (length <= 0) { - log.debug("handshake connection closed", .{}); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeClosed; - } - - if (handshake_job.count >= 50) { - log.debug("handshake taken too many cycles", .{}); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeTooManyCycles; - } - - const hs_length: usize = @intCast(length); - - switch (handshake_job.state) { - .recv => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .recv = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on recv failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeRecvFailed; - }; - - switch (hstate) { - .recv => |buf| { - log.debug("requeing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("queueing send in handshake", .{}); - handshake_job.state = .send; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } - }, - .send => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .send = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on send failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeSendFailed; - }; - - switch (hstate) { - .recv => |buf| { - handshake_job.state = .recv; - log.debug("queuing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("requeing send in handshake", .{}); - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } - }, - } - } else unreachable; - } - - fn send_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .send); - const length: i32 = t.result.?.value; - - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - // If the socket is closed. - if (length <= 0) { - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return; - } - - const send_job = &provision.job.send; - - log.debug("{d} - send triggered", .{provision.index}); - const send_count: usize = @intCast(length); - log.debug("{d} - send length: {d}", .{ provision.index, send_count }); - - switch (comptime security) { - .tls => { - assert(send_job.security == .tls); - - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const job_tls = &send_job.security.tls; - job_tls.encrypted_count += send_count; - - if (job_tls.encrypted_count >= job_tls.encrypted.len) { - if (send_job.count >= send_job.slice.len) { - // All done sending. - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = z_config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; - - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - } else { - // Queue a new chunk up for sending. - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); - - const inner_slice = send_job.slice.get( - send_job.count, - send_job.count + z_config.size_socket_buffer, - ); - - send_job.count += @intCast(inner_slice.len); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSEncryptFailed; - }; - - job_tls.encrypted = encrypted; - job_tls.encrypted_count = 0; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = job_tls.encrypted, - .func = send_task, - .ctx = provision, - }); - } - } else { - log.debug( - "{d} - sending next encrypted chunk starting at index {d}", - .{ provision.index, job_tls.encrypted_count }, - ); - - const remainder = job_tls.encrypted[job_tls.encrypted_count..]; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = remainder, - .func = send_task, - .ctx = provision, - }); - } - }, - .plain => { - assert(send_job.security == .plain); - send_job.count += send_count; - - if (send_job.count >= send_job.slice.len) { - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = z_config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; - - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - } else { - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); - - const plain_buffer = send_job.slice.get( - send_job.count, - send_job.count + z_config.size_socket_buffer, - ); - - log.debug("{d} - chunk ends at: {d}", .{ - provision.index, - plain_buffer.len + send_job.count, - }); - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); - } - }, - } - } - - pub fn listen(self: *Self, protocol_config: ProtocolConfig) !void { - log.info("server listening...", .{}); - log.info("security mode: {s}", .{@tagName(security)}); - - const EntryParams = struct { - zzz: *Self, - p_config: *ProtocolConfig, - }; - - try self.tardy.entry( - struct { - fn rt_start(rt: *Runtime, alloc: std.mem.Allocator, params: EntryParams) !void { - const socket = try params.zzz.create_socket(); - try std.posix.listen(socket, params.zzz.config.size_backlog); - - // use the arena here. - var pool_params = params.zzz.config; - pool_params.allocator = alloc; - - const provision_pool = try alloc.create(Pool(Provision)); - provision_pool.* = try Pool(Provision).init( - alloc, - params.zzz.config.size_connections_max, - Provision.init_hook, - pool_params, - ); - - for (provision_pool.items) |*provision| { - provision.data = ProtocolData.init(alloc, params.p_config); - } - - try rt.storage.store_ptr("provision_pool", provision_pool); - try rt.storage.store_ptr("z_config", ¶ms.zzz.config); - try rt.storage.store_ptr("p_config", params.p_config); - - if (comptime security == .tls) { - const tls_slice = try alloc.alloc( - TLSType, - params.zzz.config.size_connections_max, - ); - if (comptime security == .tls) { - for (tls_slice) |*tls| { - tls.* = null; - } - } - - // since slices are fat pointers... - try rt.storage.store_alloc("tls_slice", tls_slice); - try rt.storage.store_ptr("tls_ctx", ¶ms.zzz.tls_ctx); - } - - try rt.storage.store_alloc("server_socket", socket); - try rt.storage.store_alloc("accept_queued", true); - - try rt.net.accept(.{ - .socket = socket, - .func = accept_task, - }); - } - }.rt_start, - EntryParams{ - .zzz = self, - .p_config = @constCast(&protocol_config), - }, - struct { - fn rt_end(rt: *Runtime, alloc: std.mem.Allocator, _: anytype) void { - // clean up socket. - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - std.posix.close(server_socket); - - // clean up provision pool. - const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - for (provision_pool.items) |*provision| { - provision.data.deinit(alloc); - } - provision_pool.deinit(Provision.deinit_hook, alloc); - alloc.destroy(provision_pool); - - // clean up TLS. - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - alloc.free(tls_slice); - } - } - }.rt_end, - void, - ); - } - }; -} diff --git a/src/core/zprovision.zig b/src/core/zprovision.zig deleted file mode 100644 index 3e2f63e..0000000 --- a/src/core/zprovision.zig +++ /dev/null @@ -1,41 +0,0 @@ -const std = @import("std"); -const panic = std.debug.panic; -const Job = @import("../core/lib.zig").Job; -const TLS = @import("../tls/lib.zig").TLS; - -pub fn ZProvision(comptime ProtocolData: type) type { - return struct { - const Self = @This(); - index: usize, - job: Job, - socket: std.posix.socket_t, - buffer: []u8, - recv_buffer: std.ArrayList(u8), - arena: std.heap.ArenaAllocator, - data: ProtocolData, - - pub fn init_hook(provisions: []Self, ctx: anytype) void { - for (provisions) |*provision| { - provision.job = .empty; - provision.socket = undefined; - provision.data = undefined; - // Create Buffer - provision.buffer = ctx.allocator.alloc(u8, ctx.size_socket_buffer) catch { - panic("attempting to statically allocate more memory than available. (Socket Buffer)", .{}); - }; - // Create Recv Buffer - provision.recv_buffer = std.ArrayList(u8).init(ctx.allocator); - // Create the Context Arena - provision.arena = std.heap.ArenaAllocator.init(ctx.allocator); - } - } - - pub fn deinit_hook(provisions: []Self, allocator: anytype) void { - for (provisions) |*provision| { - allocator.free(provision.buffer); - provision.recv_buffer.deinit(); - provision.arena.deinit(); - } - } - }; -} diff --git a/src/http/context.zig b/src/http/context.zig index 52bae26..b4a9fa1 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -1,21 +1,122 @@ const std = @import("std"); +const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/context"); +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; + const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; +const Provision = @import("provision.zig").Provision; + +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const ResponseSetOptions = Response.ResponseSetOptions; +const Mime = @import("mime.zig").Mime; +const _SSE = @import("sse.zig").SSE; + +const Runtime = @import("tardy").Runtime; +const Task = @import("tardy").Task; +const TaskFn = @import("tardy").TaskFn; + +const raw_respond = @import("server.zig").raw_respond; + +// Context is dependent on the server that gets created. +pub fn Context(comptime Server: type) type { + return struct { + const Self = @This(); + const SSE = _SSE(Server); + allocator: std.mem.Allocator, + runtime: *Runtime, + /// The Request that triggered this handler. + request: *const Request, + /// The Response that will be returned. + response: *Response, + path: []const u8, + captures: []Capture, + queries: *QueryMap, + provision: *Provision, + triggered: bool = false, + + pub fn to_sse(self: *Self, then: TaskFn(bool, *SSE)) !void { + assert(!self.triggered); + self.triggered = true; + + self.response.set(.{ + .status = .OK, + .body = "", + .mime = Mime{ + .extension = ".sse", + .description = "Server-Sent Events", + .content_type = "text/event-stream", + }, + }); + + const headers = try self.provision.response.headers_into_buffer( + self.provision.buffer, + null, + ); + + const sse = try self.allocator.create(SSE); + sse.* = .{ + .context = self, + .runtime = self.runtime, + .allocator = self.allocator, + }; + + const pslice = Pseudoslice.init(headers, "", self.provision.buffer); + + const first_chunk = try Server.prepare_send( + self.runtime, + self.provision, + .{ .sse = .{ + .func = then, + .ctx = sse, + } }, + pslice, + ); + + try self.runtime.net.send( + self.provision, + Server.send_then_sse_task, + self.provision.socket, + first_chunk, + ); + } + + pub fn close(self: *Self) !void { + self.provision.job = .close; + try self.runtime.net.close( + self.provision, + Server.close_task, + self.provision.socket, + ); + } + + pub fn respond(self: *Self, options: ResponseSetOptions) !void { + assert(!self.triggered); + self.triggered = true; + self.response.set(options); + + const body = options.body orelse ""; + const headers = try self.provision.response.headers_into_buffer( + self.provision.buffer, + @intCast(body.len), + ); + const pslice = Pseudoslice.init(headers, body, self.provision.buffer); + + const first_chunk = try Server.prepare_send( + self.runtime, + self.provision, + .recv, + pslice, + ); -pub const Context = struct { - allocator: std.mem.Allocator, - path: []const u8, - captures: []Capture, - queries: *QueryMap, - - pub fn init(allocator: std.mem.Allocator, path: []const u8, captures: []Capture, queries: *QueryMap) Context { - return Context{ - .allocator = allocator, - .path = path, - .captures = captures, - .queries = queries, - }; - } -}; + try self.runtime.net.send( + self.provision, + Server.send_then_recv_task, + self.provision.socket, + first_chunk, + ); + } + }; +} diff --git a/src/http/headers.zig b/src/http/headers.zig index 5c778d9..21723e6 100644 --- a/src/http/headers.zig +++ b/src/http/headers.zig @@ -6,16 +6,16 @@ const CaseStringMap = @import("case_string_map.zig").CaseStringMap; pub const Headers = struct { allocator: std.mem.Allocator, map: CaseStringMap([]const u8), - num_header_max: u32, + count: u32, - pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Headers { + pub fn init(allocator: std.mem.Allocator, count: u32) !Headers { var map = CaseStringMap([]const u8).init(allocator); - try map.ensureTotalCapacity(num_header_max); + try map.ensureTotalCapacity(@intCast(count)); return Headers{ .allocator = allocator, .map = map, - .num_header_max = num_header_max, + .count = count, }; } @@ -25,11 +25,11 @@ pub const Headers = struct { pub fn add(self: *Headers, key: []const u8, value: []const u8) HTTPError!void { assert(std.mem.indexOfScalar(u8, key, ':') == null); - if (self.map.count() == self.num_header_max) return HTTPError.TooManyHeaders; + if (self.map.count() == self.count) return HTTPError.TooManyHeaders; self.map.putAssumeCapacity(key, value); } - pub fn get(self: Headers, key: []const u8) ?[]const u8 { + pub fn get(self: *const Headers, key: []const u8) ?[]const u8 { return self.map.get(key); } diff --git a/src/http/lib.zig b/src/http/lib.zig index 97e43ac..e3e9b3a 100644 --- a/src/http/lib.zig +++ b/src/http/lib.zig @@ -4,12 +4,7 @@ pub const Request = @import("request.zig").Request; pub const Response = @import("response.zig").Response; pub const Mime = @import("mime.zig").Mime; pub const Date = @import("date.zig").Date; -pub const Route = @import("route.zig").Route; -pub const Router = @import("router.zig").Router; -pub const RouteHandlerFn = @import("route.zig").RouteHandlerFn; -pub const Context = @import("context.zig").Context; pub const Headers = @import("headers.zig").Headers; -pub const Protocol = @import("protocol.zig"); pub const Server = @import("server.zig").Server; diff --git a/src/http/protocol.zig b/src/http/protocol.zig deleted file mode 100644 index 23fab20..0000000 --- a/src/http/protocol.zig +++ /dev/null @@ -1,78 +0,0 @@ -const std = @import("std"); -const Job = @import("../core/lib.zig").Job; -const Capture = @import("routing_trie.zig").Capture; -const Query = @import("routing_trie.zig").Query; -const QueryMap = @import("routing_trie.zig").QueryMap; -const Request = @import("request.zig").Request; -const Response = @import("response.zig").Response; -const Stage = @import("stage.zig").Stage; -const Router = @import("router.zig").Router; - -pub const ProtocolConfig = struct { - router: *Router, - num_header_max: u32 = 32, - /// Maximum number of Captures in a Route - /// - /// Default: 8 - num_captures_max: u32 = 8, - /// Maximum number of Queries in a URL - /// - /// Default: 8 - num_queries_max: u32 = 8, - /// Maximum size (in bytes) of the Request. - /// - /// Default: 2MB. - size_request_max: u32 = 1024 * 1024 * 2, - /// Maximum size (in bytes) of the Request URI. - /// - /// Default: 2KB. - size_request_uri_max: u32 = 1024 * 2, -}; - -pub const ProtocolData = struct { - captures: []Capture, - queries: QueryMap, - request: Request, - response: Response, - stage: Stage, - - pub fn init(allocator: std.mem.Allocator, config: *const ProtocolConfig) ProtocolData { - var queries = QueryMap.init(allocator); - queries.ensureTotalCapacity(config.num_queries_max) catch unreachable; - - return ProtocolData{ - .stage = .header, - .captures = allocator.alloc(Capture, config.num_captures_max) catch unreachable, - .queries = queries, - .request = Request.init(allocator, .{ - .num_header_max = config.num_header_max, - .size_request_max = config.size_request_max, - .size_request_uri_max = config.size_request_uri_max, - }) catch unreachable, - .response = Response.init(allocator, .{ - .num_headers_max = config.num_header_max, - }) catch unreachable, - }; - } - - pub fn deinit(self: *ProtocolData, allocator: std.mem.Allocator) void { - self.request.deinit(); - self.response.deinit(); - self.queries.deinit(); - allocator.free(self.captures); - } - - pub fn clean(self: *ProtocolData) void { - self.response.clear(); - } -}; - -const testing = std.testing; - -test "ProtocolData deinit" { - const config: ProtocolConfig = .{ .router = undefined }; - var x = ProtocolData.init(testing.allocator, &config); - defer x.deinit(testing.allocator); - - try testing.expectEqual(x.stage, .header); -} diff --git a/src/http/provision.zig b/src/http/provision.zig new file mode 100644 index 0000000..c279f24 --- /dev/null +++ b/src/http/provision.zig @@ -0,0 +1,59 @@ +const std = @import("std"); + +const Job = @import("../core/job.zig").Job; +const Capture = @import("routing_trie.zig").Capture; +const QueryMap = @import("routing_trie.zig").QueryMap; +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const Stage = @import("stage.zig").Stage; +const ServerConfig = @import("server.zig").ServerConfig; + +pub const Provision = struct { + index: usize, + job: Job, + socket: std.posix.socket_t, + buffer: []u8, + recv_buffer: std.ArrayList(u8), + arena: std.heap.ArenaAllocator, + captures: []Capture, + queries: QueryMap, + request: Request, + response: Response, + stage: Stage, + + pub fn init_hook(provisions: []Provision, config: ServerConfig) void { + for (provisions) |*provision| { + provision.job = .empty; + provision.socket = undefined; + // Create Buffer + provision.buffer = config.allocator.alloc(u8, config.size_socket_buffer) catch { + @panic("attempting to statically allocate more memory than available. (Socket Buffer)"); + }; + // Create Recv Buffer + provision.recv_buffer = std.ArrayList(u8).init(config.allocator); + // Create the Context Arena + provision.arena = std.heap.ArenaAllocator.init(config.allocator); + + provision.stage = .header; + provision.captures = config.allocator.alloc(Capture, config.num_captures_max) catch unreachable; + + var queries = QueryMap.init(config.allocator); + queries.ensureTotalCapacity(config.num_queries_max) catch unreachable; + provision.queries = queries; + provision.request = Request.init(config.allocator, config.num_header_max) catch unreachable; + provision.response = Response.init(config.allocator, config.num_header_max) catch unreachable; + } + } + + pub fn deinit_hook(provisions: []Provision, allocator: std.mem.Allocator) void { + for (provisions) |*provision| { + allocator.free(provision.buffer); + provision.recv_buffer.deinit(); + provision.arena.deinit(); + provision.request.deinit(); + provision.response.deinit(); + provision.queries.deinit(); + allocator.free(provision.captures); + } + } +}; diff --git a/src/http/request.zig b/src/http/request.zig index d0963a8..b58c3c0 100644 --- a/src/http/request.zig +++ b/src/http/request.zig @@ -6,35 +6,21 @@ const Headers = @import("lib.zig").Headers; const HTTPError = @import("lib.zig").HTTPError; const Method = @import("lib.zig").Method; -const RequestOptions = struct { - size_request_max: u32, - size_request_uri_max: u32, - num_header_max: u32, -}; - pub const Request = struct { allocator: std.mem.Allocator, - size_request_max: u32, - size_request_uri_max: u32, method: Method, uri: []const u8, - version: std.http.Version, + version: std.http.Version = .@"HTTP/1.1", headers: Headers, body: []const u8, /// This is for constructing a Request. - pub fn init(allocator: std.mem.Allocator, options: RequestOptions) !Request { - // The request size needs to be larger than the max URI size. - assert(options.size_request_max > options.size_request_uri_max); - + pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Request { return Request{ .allocator = allocator, - .headers = try Headers.init(allocator, options.num_header_max), - .size_request_max = options.size_request_max, - .size_request_uri_max = options.size_request_uri_max, + .headers = try Headers.init(allocator, num_header_max), .method = undefined, .uri = undefined, - .version = undefined, .body = undefined, }; } @@ -43,7 +29,12 @@ pub const Request = struct { self.headers.deinit(); } - pub fn parse_headers(self: *Request, bytes: []const u8) HTTPError!void { + const RequestParseOptions = struct { + size_request_max: u32, + size_request_uri_max: u32, + }; + + pub fn parse_headers(self: *Request, bytes: []const u8, options: RequestParseOptions) HTTPError!void { self.headers.clear(); var total_size: u32 = 0; var lines = std.mem.tokenizeAny(u8, bytes, "\r\n"); @@ -56,7 +47,7 @@ pub const Request = struct { while (lines.next()) |line| { total_size += @intCast(line.len); - if (total_size > self.size_request_max) { + if (total_size > options.size_request_max) { return HTTPError.ContentTooLarge; } @@ -68,16 +59,14 @@ pub const Request = struct { log.warn("invalid method: {s}", .{method_string}); return HTTPError.InvalidMethod; }; - self.set_method(method); const uri_string = chunks.next() orelse return HTTPError.MalformedRequest; - if (uri_string.len >= self.size_request_uri_max) return HTTPError.URITooLong; + if (uri_string.len >= options.size_request_uri_max) return HTTPError.URITooLong; if (uri_string[0] != '/') return HTTPError.MalformedRequest; - self.set_uri(uri_string); const version_string = chunks.next() orelse return HTTPError.MalformedRequest; if (!std.mem.eql(u8, version_string, "HTTP/1.1")) return HTTPError.HTTPVersionNotSupported; - self.set_version(.@"HTTP/1.1"); + self.set(.{ .method = method, .uri = uri_string }); // There shouldn't be anything else. if (chunks.next() != null) return HTTPError.MalformedRequest; @@ -93,20 +82,24 @@ pub const Request = struct { } } - pub fn set_method(self: *Request, method: Method) void { - self.method = method; - } + pub const RequestSetOptions = struct { + method: ?Method = null, + uri: ?[]const u8 = null, + body: ?[]const u8 = null, + }; - pub fn set_uri(self: *Request, uri: []const u8) void { - self.uri = uri; - } + pub fn set(self: *Request, options: RequestSetOptions) void { + if (options.method) |method| { + self.method = method; + } - pub fn set_version(self: *Request, version: std.http.Version) void { - self.version = version; - } + if (options.uri) |uri| { + self.uri = uri; + } - pub fn set_body(self: *Request, body: []const u8) void { - self.body = body; + if (options.body) |body| { + self.body = body; + } } /// Should this specific Request expect to capture a body. @@ -128,14 +121,13 @@ test "Parse Request" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + try request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 256, }); - defer request.deinit(); - - try request.parse_headers(request_text[0..]); try testing.expectEqual(.GET, request.method); try testing.expectEqualStrings("/", request.uri); @@ -155,14 +147,13 @@ test "Expect ContentTooLong Error" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 4096}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 128, .size_request_uri_max = 64, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.ContentTooLarge, err); } @@ -175,14 +166,13 @@ test "Expect URITooLong Error" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 4096}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024 * 1024, .size_request_uri_max = 2048, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.URITooLong, err); } @@ -195,14 +185,13 @@ test "Expect Malformed when URI missing /" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 256}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.MalformedRequest, err); } @@ -214,14 +203,13 @@ test "Expect Incorrect HTTP Version" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.HTTPVersionNotSupported, err); } @@ -233,13 +221,12 @@ test "Malformed Headers" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.MalformedRequest, err); } diff --git a/src/http/response.zig b/src/http/response.zig index e5e14e9..3335d9d 100644 --- a/src/http/response.zig +++ b/src/http/response.zig @@ -6,51 +6,22 @@ const Status = @import("lib.zig").Status; const Mime = @import("lib.zig").Mime; const Date = @import("lib.zig").Date; -const ResponseOptions = struct { - num_headers_max: u32, -}; - -const CachedDate = struct { - buffer: []u8, - ts: i64, - index: usize, -}; - pub const Response = struct { allocator: std.mem.Allocator, status: ?Status = null, mime: ?Mime = null, body: ?[]const u8 = null, headers: Headers, - cached_date: CachedDate, - pub fn init(allocator: std.mem.Allocator, options: ResponseOptions) !Response { + pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Response { return Response{ .allocator = allocator, - .headers = try Headers.init(allocator, options.num_headers_max), - .cached_date = CachedDate{ - .buffer = try allocator.alloc(u8, 32), - .index = 0, - .ts = 0, - }, + .headers = try Headers.init(allocator, num_header_max), }; } pub fn deinit(self: *Response) void { self.headers.deinit(); - self.allocator.free(self.cached_date.buffer); - } - - pub fn set_status(self: *Response, status: Status) void { - self.status = status; - } - - pub fn set_mime(self: *Response, mime: Mime) void { - self.mime = mime; - } - - pub fn set_body(self: *Response, body: []const u8) void { - self.body = body; } pub fn clear(self: *Response) void { @@ -59,7 +30,7 @@ pub const Response = struct { self.body = null; } - const ResponseSetOptions = struct { + pub const ResponseSetOptions = struct { status: ?Status = null, mime: ?Mime = null, body: ?[]const u8 = null, @@ -79,75 +50,66 @@ pub const Response = struct { } } - pub fn headers_into_buffer(self: *Response, buffer: []u8, content_length: u32) ![]u8 { - var stream = std.io.fixedBufferStream(buffer); - try self.write_headers(stream.writer(), content_length); - return stream.getWritten(); - } + pub fn headers_into_buffer(self: *Response, buffer: []u8, content_length: ?u32) ![]u8 { + var index: usize = 0; - fn write_headers(self: *Response, writer: anytype, content_length: u32) !void { // Status Line - try writer.writeAll("HTTP/1.1 "); + std.mem.copyForwards(u8, buffer[index..], "HTTP/1.1 "); + index += 9; if (self.status) |status| { - try std.fmt.formatInt(@intFromEnum(status), 10, .lower, .{}, writer); - try writer.writeAll(" "); - try writer.writeAll(@tagName(status)); + const status_code = @intFromEnum(status); + const code = try std.fmt.bufPrint(buffer[index..], "{d} ", .{status_code}); + index += code.len; + const status_name = @tagName(status); + std.mem.copyForwards(u8, buffer[index..], status_name); + index += status_name.len; } else { return error.MissingStatus; } - try writer.writeAll("\r\n"); - - // Standard Headers. - - // Cache the Date. - // Omits the Date header on any platform that doesn't support timestamp(). - const ts = std.time.timestamp(); - if (ts != 0) { - if (self.cached_date.ts != ts) { - const date = Date.init(ts).to_http_date(); - const buf = try date.into_buf(self.cached_date.buffer); - self.cached_date = .{ - .ts = ts, - .buffer = self.cached_date.buffer, - .index = buf.len, - }; - } - - assert(self.cached_date.index < self.cached_date.buffer.len); - try writer.writeAll("Date: "); - try writer.writeAll(self.cached_date.buffer[0..self.cached_date.index]); - try writer.writeAll("\r\n"); - } - - try writer.writeAll("Server: zzz\r\n"); - try writer.writeAll("Connection: keep-alive\r\n"); + std.mem.copyForwards(u8, buffer[index..], "\r\nServer: zzz\r\nConnection: keep-alive\r\n"); + index += 39; // Headers var iter = self.headers.map.iterator(); while (iter.next()) |entry| { - try writer.writeAll(entry.key_ptr.*); - try writer.writeAll(": "); - try writer.writeAll(entry.value_ptr.*); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], entry.key_ptr.*); + index += entry.key_ptr.len; + std.mem.copyForwards(u8, buffer[index..], ": "); + index += 2; + std.mem.copyForwards(u8, buffer[index..], entry.value_ptr.*); + index += entry.value_ptr.len; + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; } - // If we have an associated MIME type. + // Content-Type + std.mem.copyForwards(u8, buffer[index..], "Content-Type: "); + index += 14; if (self.mime) |m| { - try writer.writeAll("Content-Type: "); - try writer.writeAll(m.content_type); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], m.content_type); + index += m.content_type.len; } else { - // By default, we should just send as an octet-stream for safety. - try writer.writeAll("Content-Type: "); - try writer.writeAll(Mime.BIN.content_type); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], Mime.BIN.content_type); + index += Mime.BIN.content_type.len; + } + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; + + // Content-Length + if (content_length) |length| { + std.mem.copyForwards(u8, buffer[index..], "Content-Length: "); + index += 16; + const length_str = try std.fmt.bufPrint(buffer[index..], "{d}", .{length}); + index += length_str.len; + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; } - try writer.writeAll("Content-Length: "); - try std.fmt.formatInt(content_length, 10, .lower, .{}, writer); - try writer.writeAll("\r\n"); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; + + return buffer[0..index]; } }; diff --git a/src/http/route.zig b/src/http/route.zig index 7500848..26f41bc 100644 --- a/src/http/route.zig +++ b/src/http/route.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/route"); const Method = @import("method.zig").Method; const Request = @import("request.zig").Request; @@ -6,112 +7,125 @@ const Response = @import("response.zig").Response; const Context = @import("context.zig").Context; -pub const RouteHandlerFn = *const fn (request: Request, response: *Response, context: Context) void; - -pub const Route = struct { - handlers: [9]?RouteHandlerFn = [_]?RouteHandlerFn{null} ** 9, - - fn method_to_index(method: Method) u32 { - return switch (method) { - .GET => 0, - .HEAD => 1, - .POST => 2, - .PUT => 3, - .DELETE => 4, - .CONNECT => 5, - .OPTIONS => 6, - .TRACE => 7, - .PATCH => 8, - }; - } - - pub fn init() Route { - return Route{ .handlers = [_]?RouteHandlerFn{null} ** 9 }; - } - - /// Returns a comma delinated list of allowed Methods for this route. This - /// is meant to be used as the value for the 'Allow' header in the Response. - pub fn get_allowed(self: Route, allocator: std.mem.Allocator) ![]const u8 { - // This gets allocated within the context of the connection's arena. - const allowed_size = comptime blk: { - var size = 0; - for (std.meta.tags(Method)) |method| { - size += @tagName(method).len + 1; - } - break :blk size; +pub fn Route(comptime Server: type) type { + return struct { + const Self = @This(); + pub const HandlerFn = *const fn (context: *Context(Server), data: *anyopaque) anyerror!void; + fn TypedHandlerFn(comptime T: type) type { + return *const fn (context: *Context(Server), data: T) anyerror!void; + } + const HandlerWithData = struct { + handler: HandlerFn, + data: usize, }; - const buffer = try allocator.alloc(u8, allowed_size); + handlers: [9]?HandlerWithData = [_]?HandlerWithData{null} ** 9, + + fn method_to_index(method: Method) u32 { + return switch (method) { + .GET => 0, + .HEAD => 1, + .POST => 2, + .PUT => 3, + .DELETE => 4, + .CONNECT => 5, + .OPTIONS => 6, + .TRACE => 7, + .PATCH => 8, + }; + } + + pub fn init() Self { + return Self{ .handlers = [_]?HandlerWithData{null} ** 9 }; + } + + /// Returns a comma delinated list of allowed Methods for this route. This + /// is meant to be used as the value for the 'Allow' header in the Response. + pub fn get_allowed(self: Self, allocator: std.mem.Allocator) ![]const u8 { + // This gets allocated within the context of the connection's arena. + const allowed_size = comptime blk: { + var size = 0; + for (std.meta.tags(Method)) |method| { + size += @tagName(method).len + 1; + } + break :blk size; + }; + + const buffer = try allocator.alloc(u8, allowed_size); + + var current: []u8 = ""; + inline for (std.meta.tags(Method)) |method| { + if (self.handlers[@intFromEnum(method)] != null) { + current = std.fmt.bufPrint(buffer, "{s},{s}", .{ @tagName(method), current }) catch unreachable; + } + } - var current: []u8 = ""; - inline for (std.meta.tags(Method)) |method| { - if (self.handlers[@intFromEnum(method)] != null) { - current = std.fmt.bufPrint(buffer, "{s},{s}", .{ @tagName(method), current }) catch unreachable; + if (current.len == 0) { + return current; + } else { + return current[0 .. current.len - 1]; } } - if (current.len == 0) { - return current; - } else { - return current[0 .. current.len - 1]; + pub fn get_handler(self: Self, method: Method) ?HandlerWithData { + return self.handlers[method_to_index(method)]; + } + + inline fn inner_route( + comptime method: Method, + self: Self, + data: anytype, + handler_fn: TypedHandlerFn(@TypeOf(data)), + ) Self { + // You can either give a void (if you don't want to pass data through) or a pointer. + comptime assert(@typeInfo(@TypeOf(data)) == .Pointer or @typeInfo(@TypeOf(data)) == .Void); + const inner_data = switch (comptime @typeInfo(@TypeOf(data))) { + .Void => @intFromPtr(&data), + .Pointer => @intFromPtr(data), + else => unreachable, + }; + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(method)] = .{ + .handler = @ptrCast(handler_fn), + .data = inner_data, + }; + return Self{ .handlers = new_handlers }; + } + + pub fn get(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.GET, self, data, handler_fn); + } + + pub fn head(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.HEAD, self, data, handler_fn); + } + + pub fn post(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.POST, self, data, handler_fn); + } + + pub fn put(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.PUT, self, data, handler_fn); + } + + pub fn delete(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.DELETE, self, data, handler_fn); + } + + pub fn connect(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.CONNECT, self, data, handler_fn); + } + + pub fn options(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.OPTIONS, self, data, handler_fn); + } + + pub fn trace(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.TRACE, self, data, handler_fn); + } + + pub fn patch(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.PATCH, self, data, handler_fn); } - } - - pub fn get_handler(self: Route, method: Method) ?RouteHandlerFn { - return self.handlers[method_to_index(method)]; - } - - pub fn get(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.GET)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn head(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.HEAD)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn post(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.POST)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn put(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PUT)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn delete(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.DELETE)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn connect(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.CONNECT)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn options(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.OPTIONS)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn trace(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.TRACE)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn patch(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PATCH)] = handler_fn; - return Route{ .handlers = new_handlers }; - } -}; + }; +} diff --git a/src/http/router.zig b/src/http/router.zig index 515f465..c1f59da 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -2,149 +2,240 @@ const std = @import("std"); const builtin = @import("builtin"); const log = std.log.scoped(.@"zzz/http/router"); const assert = std.debug.assert; -const Route = @import("route.zig").Route; + +const _Route = @import("route.zig").Route; + const Capture = @import("routing_trie.zig").Capture; -const FoundRoute = @import("routing_trie.zig").FoundRoute; const Request = @import("request.zig").Request; const Response = @import("response.zig").Response; const Mime = @import("mime.zig").Mime; -const Context = @import("context.zig").Context; +const _Context = @import("context.zig").Context; -const RoutingTrie = @import("routing_trie.zig").RoutingTrie; +const _RoutingTrie = @import("routing_trie.zig").RoutingTrie; const QueryMap = @import("routing_trie.zig").QueryMap; -pub const Router = struct { - allocator: std.mem.Allocator, - routes: RoutingTrie, - /// This makes the router immutable, also making it - /// thread-safe when shared. - locked: bool = false, - - pub fn init(allocator: std.mem.Allocator) Router { - const routes = RoutingTrie.init(allocator) catch unreachable; - return Router{ .allocator = allocator, .routes = routes, .locked = false }; - } - - pub fn deinit(self: *Router) void { - self.routes.deinit(); - } - - pub fn serve_fs_dir(self: *Router, comptime url_path: []const u8, comptime dir_path: []const u8) !void { - assert(!self.locked); - - const route = Route.init().get(struct { - pub fn handler_fn(request: Request, response: *Response, context: Context) void { - _ = request; - - const search_path = context.captures[0].remaining; - const file_path = std.fmt.allocPrint(context.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { - response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - return; - }; - - const extension_start = std.mem.lastIndexOfScalar(u8, search_path, '.'); - const mime: Mime = blk: { - if (extension_start) |start| { - break :blk Mime.from_extension(search_path[start..]); - } else { - break :blk Mime.HTML; - } - }; - - const file: std.fs.File = std.fs.cwd().openFile(file_path, .{}) catch { - response.set(.{ - .status = .@"Not Found", - .mime = Mime.HTML, - .body = "File Not Found", - }); - return; - }; - defer file.close(); - - const file_bytes = file.readToEndAlloc(context.allocator, 1024 * 1024 * 4) catch { - response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "File Too Large", - }); - return; - }; - - response.set(.{ - .status = .OK, - .mime = mime, - .body = file_bytes, +const Runtime = @import("tardy").Runtime; +const Task = @import("tardy").Task; + +pub fn Router(comptime Server: type) type { + return struct { + const Self = @This(); + const RoutingTrie = _RoutingTrie(Server); + const FoundRoute = RoutingTrie.FoundRoute; + const Route = _Route(Server); + const Context = _Context(Server); + allocator: std.mem.Allocator, + routes: RoutingTrie, + /// This makes the router immutable, also making it + /// thread-safe when shared. + locked: bool = false, + + pub fn init(allocator: std.mem.Allocator) Self { + const routes = RoutingTrie.init(allocator) catch unreachable; + return Self{ .allocator = allocator, .routes = routes, .locked = false }; + } + + pub fn deinit(self: *Self) void { + self.routes.deinit(); + } + + const FileProvision = struct { + mime: Mime, + context: *Context, + fd: std.posix.fd_t, + offset: usize, + list: std.ArrayList(u8), + buffer: []u8, + }; + + fn open_file_task(rt: *Runtime, fd: std.posix.fd_t, provision: *FileProvision) !void { + errdefer provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }) catch unreachable; + + if (fd <= -1) { + try provision.context.respond(.{ + .status = .@"Not Found", + .mime = Mime.HTML, + .body = "File Not Found", }); + return; + } + provision.fd = fd; + + try rt.fs.read( + provision, + read_file_task, + fd, + provision.buffer, + 0, + ); + } + + fn read_file_task(rt: *Runtime, result: i32, provision: *FileProvision) !void { + errdefer provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }) catch unreachable; + + if (result <= 0) { + // If we are done reading... + try rt.fs.close( + provision, + close_file_task, + provision.fd, + ); + return; } - }.handler_fn); - - const url_with_match_all = comptime std.fmt.comptimePrint( - "{s}/%r", - .{std.mem.trimRight(u8, url_path, &.{'/'})}, - ); - - try self.serve_route(url_with_match_all, route); - } - - pub fn serve_embedded_file( - self: *Router, - comptime path: []const u8, - comptime mime: ?Mime, - comptime bytes: []const u8, - ) !void { - assert(!self.locked); - const route = Route.init().get(struct { - pub fn handler_fn(request: Request, response: *Response, _: Context) void { - response.set(.{ - .status = .OK, - .mime = mime, - .body = bytes, - }); - if (comptime builtin.mode == .Debug) { - // Don't Cache in Debug. - response.headers.add( - "Cache-Control", - "no-cache", - ) catch unreachable; - } else { - // Cache for 30 days. - response.headers.add( - "Cache-Control", - comptime std.fmt.comptimePrint("max-age={d}", .{60 * 60 * 24 * 30}), - ) catch unreachable; + const length: usize = @intCast(result); + + try provision.list.appendSlice(provision.buffer[0..length]); + + // TODO: This needs to be a setting you pass in to the router. + // + //if (provision.list.items.len > 1024 * 1024 * 4) { + // provision.context.respond(.{ + // .status = .@"Content Too Large", + // .mime = Mime.HTML, + // .body = "File Too Large", + // }); + // return; + //} + + provision.offset += length; + + try rt.fs.read( + provision, + read_file_task, + provision.fd, + provision.buffer, + provision.offset, + ); + } + + fn close_file_task(_: *Runtime, _: void, provision: *FileProvision) !void { + try provision.context.respond(.{ + .status = .OK, + .mime = provision.mime, + .body = provision.list.items[0..], + }); + } + + pub fn serve_fs_dir(self: *Self, comptime url_path: []const u8, comptime dir_path: []const u8) !void { + assert(!self.locked); + + const route = Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + const search_path = ctx.captures[0].remaining; + + const file_path = try std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }); + + // TODO: Ensure that paths cannot go out of scope and reference data that they shouldn't be allowed to. + // Very important. + + const extension_start = std.mem.lastIndexOfScalar(u8, search_path, '.'); + const mime: Mime = blk: { + if (extension_start) |start| { + break :blk Mime.from_extension(search_path[start..]); + } else { + break :blk Mime.BIN; + } + }; + + const provision = try ctx.allocator.create(FileProvision); + + provision.* = .{ + .mime = mime, + .context = ctx, + .fd = -1, + .offset = 0, + .list = std.ArrayList(u8).init(ctx.allocator), + .buffer = ctx.provision.buffer, + }; + + // We also need to support chunked encoding. + // It makes a lot more sense for files atleast. + try ctx.runtime.fs.open( + provision, + open_file_task, + file_path, + ); } + }.handler_fn); + + const url_with_match_all = comptime std.fmt.comptimePrint( + "{s}/%r", + .{std.mem.trimRight(u8, url_path, &.{'/'})}, + ); + + try self.serve_route(url_with_match_all, route); + } + + pub fn serve_embedded_file( + self: *Self, + comptime path: []const u8, + comptime mime: ?Mime, + comptime bytes: []const u8, + ) !void { + assert(!self.locked); + const route = Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { + if (comptime builtin.mode == .Debug) { + // Don't Cache in Debug. + try ctx.response.headers.add( + "Cache-Control", + "no-cache", + ); + } else { + // Cache for 30 days. + try ctx.response.headers.add( + "Cache-Control", + comptime std.fmt.comptimePrint("max-age={d}", .{std.time.s_per_day * 30}), + ); + } - // If our static item is greater than 1KB, - // it might be more beneficial to using caching. - if (comptime bytes.len > 1024) { - @setEvalBranchQuota(1_000_000); - const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); - response.headers.add("ETag", etag[0..]) catch unreachable; - - if (request.headers.get("If-None-Match")) |match| { - if (std.mem.eql(u8, etag, match)) { - response.set_status(.@"Not Modified"); - response.set_body(""); + // If our static item is greater than 1KB, + // it might be more beneficial to using caching. + if (comptime bytes.len > 1024) { + @setEvalBranchQuota(1_000_000); + const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); + try ctx.response.headers.add("ETag", etag[0..]); + + if (ctx.request.headers.get("If-None-Match")) |match| { + if (std.mem.eql(u8, etag, match)) { + try ctx.respond(.{ + .status = .@"Not Modified", + .mime = Mime.HTML, + .body = "", + }); + + return; + } } } + + try ctx.respond(.{ + .status = .OK, + .mime = mime, + .body = bytes, + }); } - } - }.handler_fn); + }.handler_fn); - try self.serve_route(path, route); - } + try self.serve_route(path, route); + } - pub fn serve_route(self: *Router, path: []const u8, route: Route) !void { - assert(!self.locked); - try self.routes.add_route(path, route); - } + pub fn serve_route(self: *Self, path: []const u8, route: Route) !void { + assert(!self.locked); + try self.routes.add_route(path, route); + } - pub fn get_route_from_host(self: Router, host: []const u8, captures: []Capture, queries: *QueryMap) ?FoundRoute { - return self.routes.get_route(host, captures, queries); - } -}; + pub fn get_route_from_host(self: Self, host: []const u8, captures: []Capture, queries: *QueryMap) ?FoundRoute { + return self.routes.get_route(host, captures, queries); + } + }; +} diff --git a/src/http/routing_trie.zig b/src/http/routing_trie.zig index 62ba338..d2a9033 100644 --- a/src/http/routing_trie.zig +++ b/src/http/routing_trie.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/routing_trie"); const CaseStringMap = @import("case_string_map.zig").CaseStringMap; -const Route = @import("lib.zig").Route; +const _Route = @import("route.zig").Route; fn TokenHashMap(comptime V: type) type { return std.HashMap(Token, V, struct { @@ -110,429 +110,433 @@ pub const Capture = union(TokenMatch) { remaining: TokenMatch.remaining.as_type(), }; -pub const FoundRoute = struct { - route: Route, - captures: []Capture, - queries: *QueryMap, -}; - // This RoutingTrie is deleteless. It only can create new routes or update existing ones. -pub const RoutingTrie = struct { - pub const Node = struct { - allocator: std.mem.Allocator, - token: Token, - route: ?Route = null, - children: TokenHashMap(*Node), - - pub fn init(allocator: std.mem.Allocator, token: Token, route: ?Route) !*Node { - const node_ptr: *Node = try allocator.create(Node); - node_ptr.* = Node{ - .allocator = allocator, - .token = token, - .route = route, - .children = TokenHashMap(*Node).init(allocator), - }; +pub fn RoutingTrie(comptime Server: type) type { + return struct { + const Self = @This(); + const Route = _Route(Server); + + pub const FoundRoute = struct { + route: Route, + captures: []Capture, + queries: *QueryMap, + }; + pub const Node = struct { + allocator: std.mem.Allocator, + token: Token, + route: ?Route = null, + children: TokenHashMap(*Node), + + pub fn init(allocator: std.mem.Allocator, token: Token, route: ?Route) !*Node { + const node_ptr: *Node = try allocator.create(Node); + node_ptr.* = Node{ + .allocator = allocator, + .token = token, + .route = route, + .children = TokenHashMap(*Node).init(allocator), + }; + + return node_ptr; + } - return node_ptr; - } + pub fn deinit(self: *Node) void { + var iter = self.children.valueIterator(); - pub fn deinit(self: *Node) void { - var iter = self.children.valueIterator(); + while (iter.next()) |node| { + node.*.deinit(); + } - while (iter.next()) |node| { - node.*.deinit(); + self.children.deinit(); + self.allocator.destroy(self); } - - self.children.deinit(); - self.allocator.destroy(self); - } - }; - - allocator: std.mem.Allocator, - root: *Node, - - pub fn init(allocator: std.mem.Allocator) !RoutingTrie { - return RoutingTrie{ - .allocator = allocator, - .root = try Node.init( - allocator, - Token{ .fragment = "" }, - Route.init(), - ), }; - } - pub fn deinit(self: *RoutingTrie) void { - self.root.deinit(); - } + allocator: std.mem.Allocator, + root: *Node, - fn print_node(root: *Node) void { - var iter = root.children.iterator(); + pub fn init(allocator: std.mem.Allocator) !Self { + return Self{ + .allocator = allocator, + .root = try Node.init( + allocator, + Token{ .fragment = "" }, + Route.init(), + ), + }; + } - while (iter.next()) |entry| { - const node_ptr = entry.value_ptr.*; - std.io.getStdOut().writer().print( - "Token: {any}\n", - .{node_ptr.token}, - ) catch return; - print_node(entry.value_ptr.*); + pub fn deinit(self: *Self) void { + self.root.deinit(); } - } - fn print(self: *RoutingTrie) void { - print_node(self.root); - } + fn print_node(root: *Node) void { + var iter = root.children.iterator(); - pub fn add_route(self: *RoutingTrie, path: []const u8, route: Route) !void { - // This is where we will parse out the path. - var iter = std.mem.tokenizeScalar(u8, path, '/'); - - var current = self.root; - while (iter.next()) |chunk| { - const token: Token = Token.parse_chunk(chunk); - if (current.children.get(token)) |child| { - current = child; - } else { - try current.children.put( - token, - try Node.init(self.allocator, token, null), - ); - - current = current.children.get(token).?; + while (iter.next()) |entry| { + const node_ptr = entry.value_ptr.*; + std.io.getStdOut().writer().print( + "Token: {any}\n", + .{node_ptr.token}, + ) catch return; + print_node(entry.value_ptr.*); } } - current.route = route; - } - - pub fn get_route( - self: RoutingTrie, - path: []const u8, - captures: []Capture, - queries: *QueryMap, - ) ?FoundRoute { - var capture_idx: usize = 0; - - queries.clearRetainingCapacity(); - - const query_pos = std.mem.indexOfScalar(u8, path, '?'); - var iter = std.mem.tokenizeScalar(u8, path[0..(query_pos orelse path.len)], '/'); - var current = self.root; - - slash_loop: while (iter.next()) |chunk| { - const fragment = Token{ .fragment = chunk }; + fn print(self: *Self) void { + print_node(self.root); + } - // If it is the fragment, match it here. - if (current.children.get(fragment)) |child| { - current = child; - continue; - } + pub fn add_route(self: *Self, path: []const u8, route: Route) !void { + // This is where we will parse out the path. + var iter = std.mem.tokenizeScalar(u8, path, '/'); - var matched = false; - for (std.meta.tags(TokenMatch)) |token_type| { - const token = Token{ .match = token_type }; + var current = self.root; + while (iter.next()) |chunk| { + const token: Token = Token.parse_chunk(chunk); if (current.children.get(token)) |child| { - matched = true; - switch (token_type) { - .signed => if (std.fmt.parseInt(i64, chunk, 10)) |value| { - captures[capture_idx] = Capture{ .signed = value }; - } else |_| continue, - .unsigned => if (std.fmt.parseInt(u64, chunk, 10)) |value| { - captures[capture_idx] = Capture{ .unsigned = value }; - } else |_| continue, - .float => if (std.fmt.parseFloat(f64, chunk)) |value| { - captures[capture_idx] = Capture{ .float = value }; - } else |_| continue, - .string => captures[capture_idx] = Capture{ .string = chunk }, - // This ends the matching sequence and claims everything. - // Does not match the query statement! - .remaining => { - const rest = iter.buffer[(iter.index - chunk.len)..]; - captures[capture_idx] = Capture{ .remaining = rest }; - current.route = child.route.?; - capture_idx += 1; - break :slash_loop; - }, - } - current = child; - capture_idx += 1; - - if (capture_idx > captures.len) { - // Should return an error here but for now, - // itll just be a null. - return null; - } + } else { + try current.children.put( + token, + try Node.init(self.allocator, token, null), + ); - break; + current = current.children.get(token).?; } } - // If we failed to match, - // this is an invalid route. - if (!matched) { - return null; - } + current.route = route; } - if (query_pos) |pos| { - if (path.len > pos + 1) { - var query_iter = std.mem.tokenizeScalar(u8, path[pos + 1 ..], '&'); - - while (query_iter.next()) |chunk| { - if (queries.count() >= queries.capacity() / 2) { - return null; - } + pub fn get_route( + self: Self, + path: []const u8, + captures: []Capture, + queries: *QueryMap, + ) ?FoundRoute { + var capture_idx: usize = 0; - const field_idx = std.mem.indexOfScalar(u8, chunk, '=') orelse break; - if (chunk.len < field_idx + 1) break; + queries.clearRetainingCapacity(); - const key = chunk[0..field_idx]; - const value = chunk[(field_idx + 1)..]; + const query_pos = std.mem.indexOfScalar(u8, path, '?'); + var iter = std.mem.tokenizeScalar(u8, path[0..(query_pos orelse path.len)], '/'); + var current = self.root; - assert(std.mem.indexOfScalar(u8, key, '=') == null); - assert(std.mem.indexOfScalar(u8, value, '=') == null); + slash_loop: while (iter.next()) |chunk| { + const fragment = Token{ .fragment = chunk }; - queries.putAssumeCapacity(key, value); + // If it is the fragment, match it here. + if (current.children.get(fragment)) |child| { + current = child; + continue; } - } - } - const route = current.route orelse return null; - return FoundRoute{ - .route = route, - .captures = captures[0..capture_idx], - .queries = queries, - }; - } -}; + var matched = false; + for (std.meta.tags(TokenMatch)) |token_type| { + const token = Token{ .match = token_type }; + if (current.children.get(token)) |child| { + matched = true; + switch (token_type) { + .signed => if (std.fmt.parseInt(i64, chunk, 10)) |value| { + captures[capture_idx] = Capture{ .signed = value }; + } else |_| continue, + .unsigned => if (std.fmt.parseInt(u64, chunk, 10)) |value| { + captures[capture_idx] = Capture{ .unsigned = value }; + } else |_| continue, + .float => if (std.fmt.parseFloat(f64, chunk)) |value| { + captures[capture_idx] = Capture{ .float = value }; + } else |_| continue, + .string => captures[capture_idx] = Capture{ .string = chunk }, + // This ends the matching sequence and claims everything. + // Does not match the query statement! + .remaining => { + const rest = iter.buffer[(iter.index - chunk.len)..]; + captures[capture_idx] = Capture{ .remaining = rest }; + current.route = child.route.?; + capture_idx += 1; + break :slash_loop; + }, + } -const testing = std.testing; + current = child; + capture_idx += 1; -test "Chunk Parsing (Fragment)" { - const chunk = "thisIsAFragment"; - const token: Token = Token.parse_chunk(chunk); + if (capture_idx > captures.len) { + // Should return an error here but for now, + // itll just be a null. + return null; + } - switch (token) { - .fragment => |inner| try testing.expectEqualStrings(chunk, inner), - .match => return error.IncorrectTokenParsing, - } -} + break; + } + } -test "Chunk Parsing (Match)" { - const chunks: [5][]const u8 = .{ - "%i", - "%d", - "%u", - "%f", - "%s", - }; + // If we failed to match, + // this is an invalid route. + if (!matched) { + return null; + } + } - const matches = [_]TokenMatch{ - TokenMatch.signed, - TokenMatch.signed, - TokenMatch.unsigned, - TokenMatch.float, - TokenMatch.string, - }; + if (query_pos) |pos| { + if (path.len > pos + 1) { + var query_iter = std.mem.tokenizeScalar(u8, path[pos + 1 ..], '&'); - for (chunks, matches) |chunk, match| { - const token: Token = Token.parse_chunk(chunk); + while (query_iter.next()) |chunk| { + if (queries.count() >= queries.capacity() / 2) { + return null; + } - switch (token) { - .fragment => return error.IncorrectTokenParsing, - .match => |inner| try testing.expectEqual(match, inner), - } - } -} + const field_idx = std.mem.indexOfScalar(u8, chunk, '=') orelse break; + if (chunk.len < field_idx + 1) break; -test "Path Parsing (Mixed)" { - const path = "/item/%i/description"; + const key = chunk[0..field_idx]; + const value = chunk[(field_idx + 1)..]; - const parsed: [3]Token = .{ - .{ .fragment = "item" }, - .{ .match = .signed }, - .{ .fragment = "description" }, - }; + assert(std.mem.indexOfScalar(u8, key, '=') == null); + assert(std.mem.indexOfScalar(u8, value, '=') == null); - var iter = std.mem.tokenizeScalar(u8, path, '/'); + queries.putAssumeCapacity(key, value); + } + } + } - for (parsed) |expected| { - const token = Token.parse_chunk(iter.next().?); - switch (token) { - .fragment => |inner| try testing.expectEqualStrings(expected.fragment, inner), - .match => |inner| try testing.expectEqual(expected.match, inner), + const route = current.route orelse return null; + return FoundRoute{ + .route = route, + .captures = captures[0..capture_idx], + .queries = queries, + }; } - } -} - -test "Custom Hashing" { - var s = TokenHashMap(bool).init(testing.allocator); - { - try s.put(.{ .fragment = "item" }, true); - try s.put(.{ .fragment = "thisisfalse" }, false); - - const state = s.get(.{ .fragment = "item" }).?; - try testing.expect(state); - - const should_be_false = s.get(.{ .fragment = "thisisfalse" }).?; - try testing.expect(!should_be_false); - } - - { - try s.put(.{ .match = .unsigned }, true); - try s.put(.{ .match = .float }, false); - try s.put(.{ .match = .string }, false); - - const state = s.get(.{ .match = .unsigned }).?; - try testing.expect(state); - - const should_be_false = s.get(.{ .match = .float }).?; - try testing.expect(!should_be_false); - - const string_state = s.get(.{ .match = .string }).?; - try testing.expect(!string_state); - } - - defer s.deinit(); -} - -test "Constructing Routing from Path" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%i/description", Route.init()); - try s.add_route("/item/%i/hello", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%s", Route.init()); - try s.add_route("/item/list", Route.init()); - - try testing.expectEqual(1, s.root.children.count()); -} - -test "Routing with Paths" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%i/description", Route.init()); - try s.add_route("/item/%i/hello", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%s", Route.init()); - try s.add_route("/item/list", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; - - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].string); - } - - { - const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; - - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - } -} - -test "Routing with Remaining" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%r", Route.init()); - try s.add_route("/item/%i/price/%f", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); - } - { - const captured = s.get_route("/item/name/THIS/IS/A/FILE/SYSTEM/PATH.html", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("THIS/IS/A/FILE/SYSTEM/PATH.html", captured.captures[0].remaining); - } - - { - const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - } - - { - const captured = s.get_route("/item/100/price/283.21", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(100, captured.captures[0].signed); - try testing.expectEqual(283.21, captured.captures[1].float); - } + }; } -test "Routing with Queries" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%r", Route.init()); - try s.add_route("/item/%i/price/%f", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO?name=muki&food=waffle", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); - try testing.expectEqual(2, q.count()); - try testing.expectEqualStrings("muki", q.get("name").?); - try testing.expectEqualStrings("waffle", q.get("food").?); - } - - { - // Purposefully bad format with no keys or values. - const captured = s.get_route("/item/2112.22121/price_float?", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - try testing.expectEqual(0, q.count()); - } - - { - // Purposefully bad format with incomplete key/value pair. - const captured = s.get_route("/item/100/price/283.21?help", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(100, captured.captures[0].signed); - try testing.expectEqual(283.21, captured.captures[1].float); - try testing.expectEqual(0, q.count()); - } +const testing = std.testing; - { - // Purposefully have too many queries. - const captured = s.get_route("/item/100/price/283.21?a=1&b=2&c=3&d=4&e=5&f=6&g=7&h=8&i=9&j=10&k=11", captures[0..], &q); - try testing.expectEqual(null, captured); - } -} +//test "Chunk Parsing (Fragment)" { +// const chunk = "thisIsAFragment"; +// const token: Token = Token.parse_chunk(chunk); +// +// switch (token) { +// .fragment => |inner| try testing.expectEqualStrings(chunk, inner), +// .match => return error.IncorrectTokenParsing, +// } +//} +// +//test "Chunk Parsing (Match)" { +// const chunks: [5][]const u8 = .{ +// "%i", +// "%d", +// "%u", +// "%f", +// "%s", +// }; +// +// const matches = [_]TokenMatch{ +// TokenMatch.signed, +// TokenMatch.signed, +// TokenMatch.unsigned, +// TokenMatch.float, +// TokenMatch.string, +// }; +// +// for (chunks, matches) |chunk, match| { +// const token: Token = Token.parse_chunk(chunk); +// +// switch (token) { +// .fragment => return error.IncorrectTokenParsing, +// .match => |inner| try testing.expectEqual(match, inner), +// } +// } +//} +// +//test "Path Parsing (Mixed)" { +// const path = "/item/%i/description"; +// +// const parsed: [3]Token = .{ +// .{ .fragment = "item" }, +// .{ .match = .signed }, +// .{ .fragment = "description" }, +// }; +// +// var iter = std.mem.tokenizeScalar(u8, path, '/'); +// +// for (parsed) |expected| { +// const token = Token.parse_chunk(iter.next().?); +// switch (token) { +// .fragment => |inner| try testing.expectEqualStrings(expected.fragment, inner), +// .match => |inner| try testing.expectEqual(expected.match, inner), +// } +// } +//} +// +//test "Custom Hashing" { +// var s = TokenHashMap(bool).init(testing.allocator); +// { +// try s.put(.{ .fragment = "item" }, true); +// try s.put(.{ .fragment = "thisisfalse" }, false); +// +// const state = s.get(.{ .fragment = "item" }).?; +// try testing.expect(state); +// +// const should_be_false = s.get(.{ .fragment = "thisisfalse" }).?; +// try testing.expect(!should_be_false); +// } +// +// { +// try s.put(.{ .match = .unsigned }, true); +// try s.put(.{ .match = .float }, false); +// try s.put(.{ .match = .string }, false); +// +// const state = s.get(.{ .match = .unsigned }).?; +// try testing.expect(state); +// +// const should_be_false = s.get(.{ .match = .float }).?; +// try testing.expect(!should_be_false); +// +// const string_state = s.get(.{ .match = .string }).?; +// try testing.expect(!string_state); +// } +// +// defer s.deinit(); +//} +// +//test "Constructing Routing from Path" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%i/description", Route.init()); +// try s.add_route("/item/%i/hello", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%s", Route.init()); +// try s.add_route("/item/list", Route.init()); +// +// try testing.expectEqual(1, s.root.children.count()); +//} +// +//test "Routing with Paths" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%i/description", Route.init()); +// try s.add_route("/item/%i/hello", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%s", Route.init()); +// try s.add_route("/item/list", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; +// +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].string); +// } +// +// { +// const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; +// +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// } +//} +// +//test "Routing with Remaining" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%r", Route.init()); +// try s.add_route("/item/%i/price/%f", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); +// } +// { +// const captured = s.get_route("/item/name/THIS/IS/A/FILE/SYSTEM/PATH.html", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("THIS/IS/A/FILE/SYSTEM/PATH.html", captured.captures[0].remaining); +// } +// +// { +// const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// } +// +// { +// const captured = s.get_route("/item/100/price/283.21", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(100, captured.captures[0].signed); +// try testing.expectEqual(283.21, captured.captures[1].float); +// } +//} +// +//test "Routing with Queries" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%r", Route.init()); +// try s.add_route("/item/%i/price/%f", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO?name=muki&food=waffle", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); +// try testing.expectEqual(2, q.count()); +// try testing.expectEqualStrings("muki", q.get("name").?); +// try testing.expectEqualStrings("waffle", q.get("food").?); +// } +// +// { +// // Purposefully bad format with no keys or values. +// const captured = s.get_route("/item/2112.22121/price_float?", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// try testing.expectEqual(0, q.count()); +// } +// +// { +// // Purposefully bad format with incomplete key/value pair. +// const captured = s.get_route("/item/100/price/283.21?help", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(100, captured.captures[0].signed); +// try testing.expectEqual(283.21, captured.captures[1].float); +// try testing.expectEqual(0, q.count()); +// } +// +// { +// // Purposefully have too many queries. +// const captured = s.get_route("/item/100/price/283.21?a=1&b=2&c=3&d=4&e=5&f=6&g=7&h=8&i=9&j=10&k=11", captures[0..], &q); +// try testing.expectEqual(null, captured); +// } +//} diff --git a/src/http/server.zig b/src/http/server.zig index 61a78d0..97bffcf 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -1,326 +1,1025 @@ const std = @import("std"); - const builtin = @import("builtin"); +const tag = builtin.os.tag; const assert = std.debug.assert; -const panic = std.debug.panic; const log = std.log.scoped(.@"zzz/http/server"); -const Runtime = @import("tardy").Runtime; -const AsyncIOType = @import("tardy").AsyncIOType; -const Pool = @import("tardy").Pool; +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; -const Job = @import("../core/lib.zig").Job; -const Pseudoslice = @import("../core/lib.zig").Pseudoslice; +const TLSFileOptions = @import("../tls/lib.zig").TLSFileOptions; +const TLSContext = @import("../tls/lib.zig").TLSContext; +const TLS = @import("../tls/lib.zig").TLS; + +const _Context = @import("context.zig").Context; +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const Capture = @import("routing_trie.zig").Capture; +const QueryMap = @import("routing_trie.zig").QueryMap; +const ResponseSetOptions = Response.ResponseSetOptions; +const _SSE = @import("sse.zig").SSE; +const Provision = @import("provision.zig").Provision; +const Mime = @import("mime.zig").Mime; +const _Router = @import("router.zig").Router; +const _Route = @import("route.zig").Route; const HTTPError = @import("lib.zig").HTTPError; -const Request = @import("lib.zig").Request; -const Response = @import("lib.zig").Response; -const Mime = @import("lib.zig").Mime; -const Context = @import("lib.zig").Context; -const Router = @import("lib.zig").Router; -const Capture = @import("routing_trie.zig").Capture; -const ProtocolData = @import("protocol.zig").ProtocolData; -const ProtocolConfig = @import("protocol.zig").ProtocolConfig; -const Security = @import("../core/server.zig").Security; -const zzzConfig = @import("../core/server.zig").zzzConfig; -const Provision = @import("../core/zprovision.zig").ZProvision(ProtocolData); +const AfterType = @import("../core/job.zig").AfterType; + +const Pool = @import("tardy").Pool; +pub const Runtime = @import("tardy").Runtime; +pub const Task = @import("tardy").Task; +const TaskFn = @import("tardy").TaskFn; +pub const AsyncIOType = @import("tardy").AsyncIOType; +const TardyCreator = @import("tardy").Tardy; +const Cross = @import("tardy").Cross; -const RecvStatus = @import("../core/server.zig").RecvStatus; -const zzzServer = @import("../core/server.zig").Server; +pub const RecvStatus = union(enum) { + kill, + recv, + send: Pseudoslice, + spawned, +}; + +/// Security Model to use.chinp acas +/// +/// Default: .plain (plaintext) +pub const Security = union(enum) { + plain, + tls: struct { + cert: TLSFileOptions, + key: TLSFileOptions, + cert_name: []const u8 = "CERTIFICATE", + key_name: []const u8 = "PRIVATE KEY", + }, +}; /// Uses the current p.response to generate and queue up the sending /// of a response. This is used when we already know what we want to send. /// /// See: `route_and_respond` -fn raw_respond(p: *Provision) !RecvStatus { +pub inline fn raw_respond(p: *Provision) !RecvStatus { { - const status_code: u16 = if (p.data.response.status) |status| @intFromEnum(status) else 0; - const status_name = if (p.data.response.status) |status| @tagName(status) else "No Status"; + const status_code: u16 = if (p.response.status) |status| @intFromEnum(status) else 0; + const status_name = if (p.response.status) |status| @tagName(status) else "No Status"; log.info("{d} - {d} {s}", .{ p.index, status_code, status_name }); } - const body = p.data.response.body orelse ""; - const header_buffer = try p.data.response.headers_into_buffer(p.buffer, @intCast(body.len)); - p.data.response.headers.clear(); + const body = p.response.body orelse ""; + const header_buffer = try p.response.headers_into_buffer(p.buffer, @intCast(body.len)); + p.response.headers.clear(); const pseudo = Pseudoslice.init(header_buffer, body, p.buffer); return .{ .send = pseudo }; } -fn route_and_respond(p: *Provision, router: *const Router) !RecvStatus { - route: { - const found = router.get_route_from_host(p.data.request.uri, p.data.captures, &p.data.queries); - if (found) |f| { - const handler = f.route.get_handler(p.data.request.method); - - if (handler) |func| { - const context: Context = Context.init( - p.arena.allocator(), - p.data.request.uri, - f.captures, - f.queries, - ); +/// These are various general configuration +/// options that are important for the actual framework. +/// +/// This includes various different options and limits +/// for interacting with the underlying network. +pub const ServerConfig = struct { + /// The allocator that server will use. + allocator: std.mem.Allocator, + /// Kernel Backlog Value. + size_backlog: u31 = 512, + /// Number of Maximum Concurrent Connections. + /// + /// This is applied PER thread if using multi-threading. + /// zzz will drop/close any connections greater + /// than this. + /// + /// You want to tune this to your expected number + /// of maximum connections. + /// + /// Default: 1024 + size_connections_max: u16 = 1024, + /// Maximum number of completions we can reap + /// with a single call of reap(). + /// + /// Default: 256 + size_completions_reap_max: u16 = 256, + /// Amount of allocated memory retained + /// after an arena is cleared. + /// + /// A higher value will increase memory usage but + /// should make allocators faster. + /// + /// A lower value will reduce memory usage but + /// will make allocators slower. + /// + /// Default: 1KB + size_connection_arena_retain: u32 = 1024, + /// Amount of space on the `recv_buffer` retained + /// after every send. + /// + /// Default: 1KB + size_recv_buffer_retain: u32 = 1024, + /// Size of the buffer (in bytes) used for + /// interacting with the socket. + /// + /// Default: 4 KB. + size_socket_buffer: u32 = 1024 * 4, + /// Maximum size (in bytes) of the Recv buffer. + /// This is mainly a concern when you are reading in + /// large requests before responding. + /// + /// Default: 2MB. + size_recv_buffer_max: u32 = 1024 * 1024 * 2, + /// Maximum number of Headers in a Request/Response + /// + /// Default: 32 + num_header_max: u32 = 32, + /// Maximum number of Captures in a Route + /// + /// Default: 8 + num_captures_max: u32 = 8, + /// Maximum number of Queries in a URL + /// + /// Default: 8 + num_queries_max: u32 = 8, + /// Maximum size (in bytes) of the Request. + /// + /// Default: 2MB. + size_request_max: u32 = 1024 * 1024 * 2, + /// Maximum size (in bytes) of the Request URI. + /// + /// Default: 2KB. + size_request_uri_max: u32 = 1024 * 2, +}; - @call(.auto, func, .{ p.data.request, &p.data.response, context }); - break :route; - } else { - // If we match the route but not the method. - p.data.response.set(.{ - .status = .@"Method Not Allowed", - .mime = Mime.HTML, - .body = "405 Method Not Allowed", - }); +pub fn Server(comptime security: Security) type { + const TLSContextType = comptime if (security == .tls) TLSContext else void; + const TLSType = comptime if (security == .tls) ?TLS else void; - // We also need to add to Allow header. - // This uses the connection's arena to allocate 64 bytes. - const allowed = f.route.get_allowed(p.arena.allocator()) catch { - p.data.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); + return struct { + const Self = @This(); + pub const Context = _Context(Self); + pub const Router = _Router(Self); + pub const Route = _Route(Self); + pub const SSE = _SSE(Self); + allocator: std.mem.Allocator, + config: ServerConfig, + addr: std.net.Address, + tls_ctx: TLSContextType, + router: *const Router, - break :route; - }; + pub fn init(config: ServerConfig) Self { + const tls_ctx = switch (comptime security) { + .tls => |inner| TLSContext.init(.{ + .allocator = config.allocator, + .cert = inner.cert, + .cert_name = inner.cert_name, + .key = inner.key, + .key_name = inner.key_name, + .size_tls_buffer_max = config.size_socket_buffer * 2, + }) catch unreachable, + .plain => void{}, + }; - p.data.response.headers.add("Allow", allowed) catch { - p.data.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); + return Self{ + .allocator = config.allocator, + .config = config, + .addr = undefined, + .tls_ctx = tls_ctx, + .router = undefined, + }; + } - break :route; - }; + pub fn deinit(self: *const Self) void { + if (comptime security == .tls) { + self.tls_ctx.deinit(); + } + } - break :route; + pub fn bind(self: *Self, host: []const u8, port: u16) !void { + assert(host.len > 0); + assert(port > 0); + + self.addr = blk: { + if (comptime tag.isDarwin() or tag.isBSD() or tag == .windows) { + break :blk try std.net.Address.parseIp(host, port); + } else { + break :blk try std.net.Address.resolveIp(host, port); + } + }; + } + + pub fn close_task(rt: *Runtime, _: void, provision: *Provision) !void { + assert(provision.job == .close); + const server_socket = rt.storage.get("__zzz_server_socket", std.posix.socket_t); + const pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); + + log.info("{d} - closing connection", .{provision.index}); + + if (comptime security == .tls) { + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + tls_ptr.*.?.deinit(); + tls_ptr.* = null; + } + + provision.socket = Cross.socket.INVALID_SOCKET; + provision.job = .empty; + _ = provision.arena.reset(.{ .retain_with_limit = config.size_connection_arena_retain }); + provision.response.clear(); + + if (provision.recv_buffer.items.len > config.size_recv_buffer_retain) { + provision.recv_buffer.shrinkRetainingCapacity(config.size_recv_buffer_retain); + } else { + provision.recv_buffer.clearRetainingCapacity(); + } + + pool.release(provision.index); + + const accept_queued = rt.storage.get_ptr("__zzz_accept_queued", bool); + if (!accept_queued.*) { + accept_queued.* = true; + try rt.net.accept( + server_socket, + accept_task, + server_socket, + ); } } - // Didn't match any route. - p.data.response.set(.{ - .status = .@"Not Found", - .mime = Mime.HTML, - .body = "404 Not Found", - }); - break :route; - } + fn accept_task(rt: *Runtime, child_socket: std.posix.socket_t, socket: std.posix.socket_t) !void { + const pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); + const accept_queued = rt.storage.get_ptr("__zzz_accept_queued", bool); + accept_queued.* = false; - if (p.data.response.status == .Kill) { - return .kill; - } + if (rt.scheduler.tasks.clean() >= 2) { + accept_queued.* = true; + try rt.net.accept(socket, accept_task, socket); + } - return try raw_respond(p); -} + if (!Cross.socket.is_valid(child_socket)) { + log.err("socket accept failed", .{}); + return error.AcceptFailed; + } -pub fn recv_fn( - rt: *Runtime, - provision: *Provision, - p_config: *const ProtocolConfig, - z_config: *const zzzConfig, - recv_buffer: []const u8, -) RecvStatus { - _ = rt; - _ = z_config; - - var stage = provision.data.stage; - const job = provision.job.recv; - - if (job.count >= p_config.size_request_max) { - provision.data.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); - - return raw_respond(provision) catch unreachable; - } + // This should never fail. It means that we have a dangling item. + assert(pool.clean() > 0); + const borrowed = pool.borrow() catch unreachable; + + log.info("{d} - accepting connection", .{borrowed.index}); + log.debug( + "empty provision slots: {d}", + .{pool.items.len - pool.dirty.count()}, + ); + assert(borrowed.item.job == .empty); + + try Cross.socket.disable_nagle(child_socket); + try Cross.socket.to_nonblock(child_socket); + + const provision = borrowed.item; + + // Store the index of this item. + provision.index = @intCast(borrowed.index); + provision.socket = child_socket; + log.debug("provision buffer size: {d}", .{provision.buffer.len}); + + switch (comptime security) { + .tls => |_| { + const tls_ctx = rt.storage.get_const_ptr("__zzz_tls_ctx", TLSContextType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* == null); + + tls_ptr.* = tls_ctx.create(child_socket) catch |e| { + log.err("{d} - tls creation failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSCreationFailed; + }; + + const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { + log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSStartHandshakeFailed; + }; - switch (stage) { - .header => { - const start = provision.recv_buffer.items.len -| 4; - provision.recv_buffer.appendSlice(recv_buffer) catch unreachable; - const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); + provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; + try rt.net.recv(borrowed.item, handshake_task, child_socket, recv_buf); + }, + .plain => { + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv(provision, recv_task, child_socket, provision.buffer); + }, + } + } + + fn recv_task(rt: *Runtime, length: i32, provision: *Provision) !void { + assert(provision.job == .recv); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); + const router = rt.storage.get_const_ptr("__zzz_router", Router); + + const recv_job = &provision.job.recv; - // Basically, this means we haven't finished processing the header. - if (header_ends == null) { - log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); - return .recv; + // If the socket is closed. + if (length <= 0) { + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return; } - log.debug("{d} - parsing header", .{provision.index}); - // The +4 is to account for the slice we match. - const header_end: u32 = @intCast(header_ends.? + 4); - provision.data.request.parse_headers(provision.recv_buffer.items[0..header_end]) catch |e| { - switch (e) { - HTTPError.ContentTooLarge => { - provision.data.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); - }, - HTTPError.TooManyHeaders => { - provision.data.response.set(.{ - .status = .@"Request Header Fields Too Large", - .mime = Mime.HTML, - .body = "Too Many Headers", - }); - }, - HTTPError.MalformedRequest => { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Malformed Request", - }); + log.debug("{d} - recv triggered", .{provision.index}); + + const recv_count: usize = @intCast(length); + recv_job.count += recv_count; + const pre_recv_buffer = provision.buffer[0..recv_count]; + + const recv_buffer = blk: { + switch (comptime security) { + .tls => |_| { + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { + log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSDecryptFailed; + }; }, - HTTPError.URITooLong => { - provision.data.response.set(.{ - .status = .@"URI Too Long", - .mime = Mime.HTML, - .body = "URI Too Long", - }); + .plain => break :blk pre_recv_buffer, + } + }; + + const status = try on_recv(recv_buffer, rt, provision, router, config); + + switch (status) { + .spawned => return, + .kill => { + rt.stop(); + return error.Killed; + }, + .recv => { + try rt.net.recv( + provision, + recv_task, + provision.socket, + provision.buffer, + ); + }, + .send => |pslice| { + const first_buffer = try prepare_send(rt, provision, .recv, pslice); + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + first_buffer, + ); + }, + } + } + + fn handshake_task(rt: *Runtime, length: i32, provision: *Provision) !void { + assert(security == .tls); + if (comptime security == .tls) { + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + + assert(provision.job == .handshake); + const handshake_job = &provision.job.handshake; + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + log.debug("processing handshake", .{}); + handshake_job.count += 1; + + if (length <= 0) { + log.debug("handshake connection closed", .{}); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSHandshakeClosed; + } + + if (handshake_job.count >= 50) { + log.debug("handshake taken too many cycles", .{}); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSHandshakeTooManyCycles; + } + + const hs_length: usize = @intCast(length); + + const hstate = switch (handshake_job.state) { + .recv => tls_ptr.*.?.continue_handshake(.{ .recv = @intCast(hs_length) }), + .send => tls_ptr.*.?.continue_handshake(.{ .send = @intCast(hs_length) }), + } catch |e| { + log.err("{d} - tls handshake failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSHandshakeRecvFailed; + }; + + switch (hstate) { + .recv => |buf| { + log.debug("queueing recv in handshake", .{}); + handshake_job.state = .recv; + try rt.net.recv(provision, handshake_task, provision.socket, buf); }, - HTTPError.InvalidMethod => { - provision.data.response.set(.{ - .status = .@"Not Implemented", - .mime = Mime.HTML, - .body = "Not Implemented", - }); + .send => |buf| { + log.debug("queueing send in handshake", .{}); + handshake_job.state = .send; + try rt.net.send(provision, handshake_task, provision.socket, buf); }, - HTTPError.HTTPVersionNotSupported => { - provision.data.response.set(.{ - .status = .@"HTTP Version Not Supported", - .mime = Mime.HTML, - .body = "HTTP Version Not Supported", - }); + .complete => { + log.debug("handshake complete", .{}); + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv(provision, recv_task, provision.socket, provision.buffer); }, } + } + } - return raw_respond(provision) catch unreachable; - }; + /// Prepares the provision send_job and returns the first send chunk + pub fn prepare_send(rt: *Runtime, provision: *Provision, after: AfterType, pslice: Pseudoslice) ![]const u8 { + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); + const plain_buffer = pslice.get(0, config.size_socket_buffer); - // Logging information about Request. - log.info("{d} - \"{s} {s}\" {s}", .{ - provision.index, - @tagName(provision.data.request.method), - provision.data.request.uri, - provision.data.request.headers.get("User-Agent") orelse "N/A", - }); - - // HTTP/1.1 REQUIRES a Host header to be present. - const is_http_1_1 = provision.data.request.version == .@"HTTP/1.1"; - const is_host_present = provision.data.request.headers.get("Host") != null; - if (is_http_1_1 and !is_host_present) { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Missing \"Host\" Header", - }); + switch (comptime security) { + .tls => { + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSEncryptFailed; + }; - return raw_respond(provision) catch unreachable; + provision.job = .{ + .send = .{ + .after = after, + .slice = pslice, + .count = @intCast(plain_buffer.len), + .security = .{ + .tls = .{ + .encrypted = encrypted_buffer, + .encrypted_count = 0, + }, + }, + }, + }; + + return encrypted_buffer; + }, + .plain => { + provision.job = .{ + .send = .{ + .after = after, + .slice = pslice, + .count = 0, + .security = .plain, + }, + }; + + return plain_buffer; + }, } + } - if (!provision.data.request.expect_body()) { - return route_and_respond(provision, p_config.router) catch unreachable; + pub const send_then_sse_task = send_then(struct { + fn inner(rt: *Runtime, success: bool, provision: *Provision) !void { + const send_job = provision.job.send; + assert(send_job.after == .sse); + const func: TaskFn(bool, *anyopaque) = @ptrCast(@alignCast(send_job.after.sse.func)); + const ctx: *anyopaque = @ptrCast(@alignCast(send_job.after.sse.ctx)); + try @call(.auto, func, .{ rt, success, ctx }); + + if (!success) { + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + } } + }.inner); - // Everything after here is a Request that is expecting a body. - const content_length = blk: { - const length_string = provision.data.request.headers.get("Content-Length") orelse { - break :blk 0; - }; + pub const send_then_recv_task = send_then(struct { + fn inner(rt: *Runtime, success: bool, provision: *Provision) !void { + if (!success) { + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return; + } - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); - return raw_respond(provision) catch unreachable; - }; - }; + log.debug("{d} - queueing a new recv", .{provision.index}); + _ = provision.arena.reset(.{ + .retain_with_limit = config.size_connection_arena_retain, + }); + provision.recv_buffer.clearRetainingCapacity(); + provision.job = .{ .recv = .{ .count = 0 } }; - if (header_end < provision.recv_buffer.items.len) { - const difference = provision.recv_buffer.items.len - header_end; - if (difference == content_length) { - // Whole Body - log.debug("{d} - got whole body with header", .{provision.index}); - const body_end = header_end + difference; - provision.data.request.set_body(provision.recv_buffer.items[header_end..body_end]); - return route_and_respond(provision, p_config.router) catch unreachable; - } else { - // Partial Body - log.debug("{d} - got partial body with header", .{provision.index}); - stage = .{ .body = header_end }; - return .recv; + try rt.net.recv( + provision, + recv_task, + provision.socket, + provision.buffer, + ); + } + }.inner); + + fn send_then(comptime func: TaskFn(bool, *Provision)) TaskFn(i32, *Provision) { + return struct { + fn send_then_inner(rt: *Runtime, length: i32, provision: *Provision) !void { + assert(provision.job == .send); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); + + // If the socket is closed. + if (length <= 0) { + try @call(.always_inline, func, .{ rt, false, provision }); + return; + } + + const send_job = &provision.job.send; + + log.debug("{d} - send triggered", .{provision.index}); + const send_count: usize = @intCast(length); + log.debug("{d} - sent length: {d}", .{ provision.index, send_count }); + + switch (comptime security) { + .tls => { + assert(send_job.security == .tls); + + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + + const job_tls = &send_job.security.tls; + job_tls.encrypted_count += send_count; + + if (job_tls.encrypted_count >= job_tls.encrypted.len) { + if (send_job.count >= send_job.slice.len) { + try @call(.always_inline, func, .{ rt, true, provision }); + } else { + // Queue a new chunk up for sending. + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); + + const inner_slice = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); + + send_job.count += @intCast(inner_slice.len); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSEncryptFailed; + }; + + job_tls.encrypted = encrypted; + job_tls.encrypted_count = 0; + + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + job_tls.encrypted, + ); + } + } else { + log.debug( + "{d} - sending next encrypted chunk starting at index {d}", + .{ provision.index, job_tls.encrypted_count }, + ); + + const remainder = job_tls.encrypted[job_tls.encrypted_count..]; + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + remainder, + ); + } + }, + .plain => { + assert(send_job.security == .plain); + send_job.count += send_count; + + if (send_job.count >= send_job.slice.len) { + try @call(.always_inline, func, .{ rt, true, provision }); + } else { + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); + + const plain_buffer = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); + + log.debug("socket buffer size: {d}", .{config.size_socket_buffer}); + + log.debug("{d} - chunk ends at: {d}", .{ + provision.index, + plain_buffer.len + send_job.count, + }); + + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + plain_buffer, + ); + } + }, + } } - } else if (header_end == provision.recv_buffer.items.len) { - // Body of length 0 probably or only got header. - if (content_length == 0) { - log.debug("{d} - got body of length 0", .{provision.index}); - // Body of Length 0. - provision.data.request.set_body(""); - return route_and_respond(provision, p_config.router) catch unreachable; - } else { - // Got only header. - log.debug("{d} - got all header aka no body", .{provision.index}); - stage = .{ .body = header_end }; - return .recv; + }.send_then_inner; + } + + pub inline fn serve(self: *Self, router: *const Router, rt: *Runtime) !void { + self.router = router; + + log.info("server listening...", .{}); + log.info("security mode: {s}", .{@tagName(security)}); + + const socket = try self.create_socket(); + try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); + try std.posix.listen(socket, self.config.size_backlog); + + const provision_pool = try rt.allocator.create(Pool(Provision)); + provision_pool.* = try Pool(Provision).init( + rt.allocator, + self.config.size_connections_max, + self.config, + Provision.init_hook, + ); + + try rt.storage.store_ptr("__zzz_router", @constCast(router)); + try rt.storage.store_ptr("__zzz_provision_pool", provision_pool); + try rt.storage.store_alloc("__zzz_config", self.config); + + if (comptime security == .tls) { + const tls_slice = try rt.allocator.alloc( + TLSType, + self.config.size_connections_max, + ); + if (comptime security == .tls) { + for (tls_slice) |*tls| { + tls.* = null; + } } - } else unreachable; - }, - - .body => |header_end| { - // We should ONLY be here if we expect there to be a body. - assert(provision.data.request.expect_body()); - log.debug("{d} - body matching triggered", .{provision.index}); - - const content_length = blk: { - const length_string = provision.data.request.headers.get("Content-Length") orelse { - provision.data.response.set(.{ - .status = .@"Length Required", - .mime = Mime.HTML, - .body = "", - }); - return raw_respond(provision) catch unreachable; - }; + // since slices are fat pointers... + try rt.storage.store_alloc("__zzz_tls_slice", tls_slice); + try rt.storage.store_alloc("__zzz_tls_ctx", self.tls_ctx); + } - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); + try rt.storage.store_alloc("__zzz_server_socket", socket); + try rt.storage.store_alloc("__zzz_accept_queued", true); - return raw_respond(provision) catch unreachable; - }; + try rt.net.accept(socket, accept_task, socket); + } + + pub inline fn clean(rt: *Runtime) !void { + // clean up socket. + const server_socket = rt.storage.get("__zzz_server_socket", std.posix.socket_t); + std.posix.close(server_socket); + + // clean up provision pool. + const provision_pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); + provision_pool.deinit(rt.allocator, Provision.deinit_hook); + rt.allocator.destroy(provision_pool); + + // clean up TLS. + if (comptime security == .tls) { + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); + rt.allocator.free(tls_slice); + } + } + + fn create_socket(self: *const Self) !std.posix.socket_t { + const socket: std.posix.socket_t = blk: { + const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; + break :blk try std.posix.socket( + self.addr.any.family, + socket_flags, + std.posix.IPPROTO.TCP, + ); }; - const request_length = header_end + content_length; + log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); - // If this body will be too long, abort early. - if (request_length > p_config.size_request_max) { - provision.data.response.set(.{ - .status = .@"Content Too Large", + if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT_LB, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEADDR, + &std.mem.toBytes(@as(c_int, 1)), + ); + } + + return socket; + } + + fn route_and_respond(runtime: *Runtime, p: *Provision, router: *const Router) !RecvStatus { + route: { + const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); + if (found) |f| { + const handler = f.route.get_handler(p.request.method); + + if (handler) |h_with_data| { + const context: *Context = try p.arena.allocator().create(Context); + context.* = .{ + .allocator = p.arena.allocator(), + .runtime = runtime, + .request = &p.request, + .response = &p.response, + .path = p.request.uri, + .captures = f.captures, + .queries = f.queries, + .provision = p, + }; + + @call(.auto, h_with_data.handler, .{ + context, + @as(*anyopaque, @ptrFromInt(h_with_data.data)), + }) catch |e| { + log.err("\"{s}\" handler failed with error: {}", .{ p.request.uri, e }); + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + return try raw_respond(p); + }; + + return .spawned; + } else { + // If we match the route but not the method. + p.response.set(.{ + .status = .@"Method Not Allowed", + .mime = Mime.HTML, + .body = "405 Method Not Allowed", + }); + + // We also need to add to Allow header. + // This uses the connection's arena to allocate 64 bytes. + const allowed = f.route.get_allowed(p.arena.allocator()) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + p.response.headers.add("Allow", allowed) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + break :route; + } + } + + // Didn't match any route. + p.response.set(.{ + .status = .@"Not Found", .mime = Mime.HTML, - .body = "", + .body = "404 Not Found", }); - return raw_respond(provision) catch unreachable; + break :route; } - if (job.count >= request_length) { - provision.data.request.set_body(provision.recv_buffer.items[header_end..request_length]); - return route_and_respond(provision, p_config.router) catch unreachable; - } else { - return .recv; + if (p.response.status == .Kill) { + return .kill; + } + + return try raw_respond(p); + } + + inline fn on_recv( + buffer: []const u8, + rt: *Runtime, + provision: *Provision, + router: *const Router, + config: *const ServerConfig, + ) !RecvStatus { + var stage = provision.stage; + const job = provision.job.recv; + + if (job.count >= config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + + return try raw_respond(provision); } - }, - } -} -pub fn Server(comptime security: Security, comptime async_type: AsyncIOType) type { - return zzzServer(security, async_type, ProtocolData, ProtocolConfig, recv_fn); + switch (stage) { + .header => { + const start = provision.recv_buffer.items.len -| 4; + try provision.recv_buffer.appendSlice(buffer); + const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); + + // Basically, this means we haven't finished processing the header. + if (header_ends == null) { + log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); + return .recv; + } + + log.debug("{d} - parsing header", .{provision.index}); + // The +4 is to account for the slice we match. + const header_end: u32 = @intCast(header_ends.? + 4); + provision.request.parse_headers(provision.recv_buffer.items[0..header_end], .{ + .size_request_max = config.size_request_max, + .size_request_uri_max = config.size_request_uri_max, + }) catch |e| { + switch (e) { + HTTPError.ContentTooLarge => { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + }, + HTTPError.TooManyHeaders => { + provision.response.set(.{ + .status = .@"Request Header Fields Too Large", + .mime = Mime.HTML, + .body = "Too Many Headers", + }); + }, + HTTPError.MalformedRequest => { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Malformed Request", + }); + }, + HTTPError.URITooLong => { + provision.response.set(.{ + .status = .@"URI Too Long", + .mime = Mime.HTML, + .body = "URI Too Long", + }); + }, + HTTPError.InvalidMethod => { + provision.response.set(.{ + .status = .@"Not Implemented", + .mime = Mime.HTML, + .body = "Not Implemented", + }); + }, + HTTPError.HTTPVersionNotSupported => { + provision.response.set(.{ + .status = .@"HTTP Version Not Supported", + .mime = Mime.HTML, + .body = "HTTP Version Not Supported", + }); + }, + } + + return raw_respond(provision) catch unreachable; + }; + + // Logging information about Request. + log.info("{d} - \"{s} {s}\" {s}", .{ + provision.index, + @tagName(provision.request.method), + provision.request.uri, + provision.request.headers.get("User-Agent") orelse "N/A", + }); + + // HTTP/1.1 REQUIRES a Host header to be present. + const is_http_1_1 = provision.request.version == .@"HTTP/1.1"; + const is_host_present = provision.request.headers.get("Host") != null; + if (is_http_1_1 and !is_host_present) { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Missing \"Host\" Header", + }); + + return try raw_respond(provision); + } + + if (!provision.request.expect_body()) { + return try route_and_respond(rt, provision, router); + } + + // Everything after here is a Request that is expecting a body. + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + break :blk 0; + }; + + break :blk try std.fmt.parseInt(u32, length_string, 10); + }; + + if (header_end < provision.recv_buffer.items.len) { + const difference = provision.recv_buffer.items.len - header_end; + if (difference == content_length) { + // Whole Body + log.debug("{d} - got whole body with header", .{provision.index}); + const body_end = header_end + difference; + provision.request.set(.{ + .body = provision.recv_buffer.items[header_end..body_end], + }); + return try route_and_respond(rt, provision, router); + } else { + // Partial Body + log.debug("{d} - got partial body with header", .{provision.index}); + stage = .{ .body = header_end }; + return .recv; + } + } else if (header_end == provision.recv_buffer.items.len) { + // Body of length 0 probably or only got header. + if (content_length == 0) { + log.debug("{d} - got body of length 0", .{provision.index}); + // Body of Length 0. + provision.request.set(.{ .body = "" }); + return try route_and_respond(rt, provision, router); + } else { + // Got only header. + log.debug("{d} - got all header aka no body", .{provision.index}); + stage = .{ .body = header_end }; + return .recv; + } + } else unreachable; + }, + + .body => |header_end| { + // We should ONLY be here if we expect there to be a body. + assert(provision.request.expect_body()); + log.debug("{d} - body matching", .{provision.index}); + + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + provision.response.set(.{ + .status = .@"Length Required", + .mime = Mime.HTML, + .body = "", + }); + + return try raw_respond(provision); + }; + + break :blk try std.fmt.parseInt(u32, length_string, 10); + }; + + const request_length = header_end + content_length; + + // If this body will be too long, abort early. + if (request_length > config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "", + }); + return try raw_respond(provision); + } + + if (job.count >= request_length) { + provision.request.set(.{ + .body = provision.recv_buffer.items[header_end..request_length], + }); + return try route_and_respond(rt, provision, router); + } else { + return .recv; + } + }, + } + } + }; } diff --git a/src/http/sse.zig b/src/http/sse.zig new file mode 100644 index 0000000..2b5ee66 --- /dev/null +++ b/src/http/sse.zig @@ -0,0 +1,78 @@ +const std = @import("std"); + +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; + +const Provision = @import("provision.zig").Provision; +const _Context = @import("context.zig").Context; + +const TaskFn = @import("tardy").TaskFn; +const Runtime = @import("tardy").Runtime; + +const SSEMessage = struct { + id: ?[]const u8 = null, + event: ?[]const u8 = null, + data: ?[]const u8 = null, + retry: ?u64 = null, +}; + +pub fn SSE(comptime Server: type) type { + const Context = _Context(Server); + return struct { + const Self = @This(); + context: *Context, + allocator: std.mem.Allocator, + runtime: *Runtime, + + pub fn send( + self: *Self, + options: SSEMessage, + then_context: anytype, + then: TaskFn(bool, @TypeOf(then_context)), + ) !void { + var index: usize = 0; + const buffer = self.context.provision.buffer; + + if (options.id) |id| { + const buf = try std.fmt.bufPrint(buffer[index..], "id: {s}\n", .{id}); + index += buf.len; + } + + if (options.event) |event| { + const buf = try std.fmt.bufPrint(buffer[index..], "event: {s}\n", .{event}); + index += buf.len; + } + + if (options.data) |data| { + const buf = try std.fmt.bufPrint(buffer[index..], "data: {s}\n", .{data}); + index += buf.len; + } + + if (options.retry) |retry| { + const buf = try std.fmt.bufPrint(buffer[index..], "retry: {d}\n", .{retry}); + index += buf.len; + } + + buffer[index] = '\n'; + index += 1; + + const pslice = Pseudoslice.init(buffer[0..index], "", buffer); + + const first_chunk = Server.prepare_send( + self.context.runtime, + self.context.provision, + .{ .sse = .{ + .func = then, + .ctx = then_context, + } }, + pslice, + ) catch unreachable; + + self.context.runtime.net.send( + self.context.provision, + Server.send_then_sse_task, + self.context.provision.socket, + first_chunk, + ) catch unreachable; + } + }; +}