From 919164f5786dda28933840901d62bf0630b2c021 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Fri, 18 Oct 2024 02:42:08 -0700 Subject: [PATCH 01/17] http/server: handlers in async context --- examples/http/basic/main.zig | 4 +- examples/http/benchmark/main.zig | 10 ++-- examples/http/minram/main.zig | 8 ++-- examples/http/multithread/main.zig | 39 +++++++-------- examples/http/tls/main.zig | 14 ++++-- examples/http/valgrind/main.zig | 12 +++-- src/core/server.zig | 77 +++++++++++++++++++++++++++++- src/http/context.zig | 62 +++++++++++++++++++++++- src/http/response.zig | 2 +- src/http/route.zig | 2 +- src/http/router.zig | 33 +++++++------ src/http/server.zig | 30 +++++++----- 12 files changed, 221 insertions(+), 72 deletions(-) diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index bc9f337..088b134 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -15,7 +15,7 @@ pub fn main() !void { defer router.deinit(); try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + pub fn handler_fn(ctx: *http.Context) void { const body = \\ \\ @@ -25,7 +25,7 @@ pub fn main() !void { \\ ; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index b17e53d..cba7b61 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -7,10 +7,10 @@ pub const std_options = .{ .log_level = .err, }; -fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) void { - const name = context.captures[0].string; +fn hi_handler(ctx: *http.Context) void { + const name = ctx.captures[0].string; - const body = std.fmt.allocPrint(context.allocator, + const body = std.fmt.allocPrint(ctx.allocator, \\ \\ \\ @@ -28,7 +28,7 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) \\ \\ , .{name}) catch { - response.set(.{ + ctx.respond(.{ .status = .@"Internal Server Error", .mime = http.Mime.HTML, .body = "Out of Memory!", @@ -36,7 +36,7 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) return; }; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body, diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig index c81a0fb..df23810 100644 --- a/examples/http/minram/main.zig +++ b/examples/http/minram/main.zig @@ -7,7 +7,9 @@ pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - var gpa = std.heap.GeneralPurposeAllocator(.{ .enable_memory_limit = true }){ .requested_memory_limit = 1024 * 300 }; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .enable_memory_limit = true }, + ){ .requested_memory_limit = 1024 * 300 }; const allocator = gpa.allocator(); defer _ = gpa.deinit(); @@ -15,7 +17,7 @@ pub fn main() !void { defer router.deinit(); try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + pub fn handler_fn(ctx: *http.Context) void { const body = \\ \\ @@ -25,7 +27,7 @@ pub fn main() !void { \\ ; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig index d434b67..7ba9245 100644 --- a/examples/http/multithread/main.zig +++ b/examples/http/multithread/main.zig @@ -3,11 +3,11 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/multithread"); -fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) void { - const name = context.captures[0].string; - const greeting = context.queries.get("greeting") orelse "Hi"; +fn hi_handler(ctx: *http.Context) void { + const name = ctx.captures[0].string; + const greeting = ctx.queries.get("greeting") orelse "Hi"; - const body = std.fmt.allocPrint(context.allocator, + const body = std.fmt.allocPrint(ctx.allocator, \\ \\ \\ @@ -25,7 +25,7 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) \\ \\ , .{ greeting, name }) catch { - response.set(.{ + ctx.respond(.{ .status = .@"Internal Server Error", .mime = http.Mime.HTML, .body = "Out of Memory!", @@ -33,35 +33,26 @@ fn hi_handler(_: http.Request, response: *http.Response, context: http.Context) return; }; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body, }); } -fn redir_handler(_: http.Request, response: *http.Response, context: http.Context) void { - _ = context; - response.set(.{ +fn redir_handler(ctx: *http.Context) void { + ctx.response.headers.add("Location", "/hi/redirect") catch unreachable; + ctx.respond(.{ .status = .@"Permanent Redirect", .mime = http.Mime.HTML, .body = "", }); - - response.headers.add("Location", "/hi/redirect") catch { - response.set(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Redirect Handler Failed", - }); - return; - }; } -fn post_handler(request: http.Request, response: *http.Response, _: http.Context) void { - log.debug("Body: {s}", .{request.body}); +fn post_handler(ctx: *http.Context) void { + log.debug("Body: {s}", .{ctx.request.body}); - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = "", @@ -73,7 +64,11 @@ pub fn main() !void { const port: u16 = 9862; // if multithreaded, you need a thread-safe allocator. - const allocator = std.heap.page_allocator; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); var router = http.Router.init(allocator); defer router.deinit(); diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig index c923d07..738fbf1 100644 --- a/examples/http/tls/main.zig +++ b/examples/http/tls/main.zig @@ -6,7 +6,11 @@ pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - const allocator = std.heap.c_allocator; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); var router = http.Router.init(allocator); defer router.deinit(); @@ -14,7 +18,7 @@ pub fn main() !void { try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + pub fn handler_fn(ctx: *http.Context) void { const body = \\ \\ @@ -27,7 +31,7 @@ pub fn main() !void { \\ ; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -36,8 +40,8 @@ pub fn main() !void { }.handler_fn)); try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ + pub fn handler_fn(ctx: *http.Context) void { + ctx.respond(.{ .status = .Kill, .mime = http.Mime.HTML, .body = "", diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig index 83d3dc2..89d313c 100644 --- a/examples/http/valgrind/main.zig +++ b/examples/http/valgrind/main.zig @@ -7,13 +7,15 @@ pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - const allocator = std.heap.c_allocator; + var gpa = std.heap.GeneralPurposeAllocator(.{}){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); var router = http.Router.init(allocator); defer router.deinit(); try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + pub fn handler_fn(ctx: *http.Context) void { const body = \\ \\ @@ -23,7 +25,7 @@ pub fn main() !void { \\ ; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -32,8 +34,8 @@ pub fn main() !void { }.handler_fn)); try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ + pub fn handler_fn(ctx: *http.Context) void { + ctx.respond(.{ .status = .Kill, .mime = http.Mime.HTML, .body = "", diff --git a/src/core/server.zig b/src/core/server.zig index f8abe54..585147f 100644 --- a/src/core/server.zig +++ b/src/core/server.zig @@ -14,6 +14,7 @@ const Pool = @import("tardy").Pool; pub const Threading = @import("tardy").TardyThreading; pub const Runtime = @import("tardy").Runtime; pub const Task = @import("tardy").Task; +const TaskFn = @import("tardy").TaskFn; pub const AsyncIOType = @import("tardy").AsyncIOType; const TardyCreator = @import("tardy").Tardy; const Cross = @import("tardy").Cross; @@ -22,9 +23,10 @@ pub const RecvStatus = union(enum) { kill, recv, send: Pseudoslice, + spawned, }; -/// Security Model to use. +/// Security Model to use.chinp acas /// /// Default: .plain (plaintext) pub const Security = union(enum) { @@ -94,6 +96,7 @@ pub const zzzConfig = struct { fn RecvFn(comptime ProtocolData: type, comptime ProtocolConfig: type) type { return *const fn ( rt: *Runtime, + trigger_task: TaskFn, provision: *ZProvision(ProtocolData), p_config: *const ProtocolConfig, z_config: *const zzzConfig, @@ -340,6 +343,75 @@ pub fn Server( } } + /// This is the task you MUST trigger if the `recv_fn` returns `.spawned`. + fn trigger_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + + switch (provision.job) { + else => unreachable, + .recv => { + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + }, + .send => |*send_job| { + const z_config: *const zzzConfig = @ptrCast(@alignCast(rt.storage.get("z_config").?)); + const plain_buffer = send_job.slice.get(0, z_config.size_socket_buffer); + + switch (comptime security) { + .tls => |_| { + const tls_slice: []TLSType = @as( + [*]TLSType, + @ptrCast(@alignCast(rt.storage.get("tls_slice").?)), + )[0..z_config.size_connections_max]; + + const tls_ptr: *?TLS = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .fd = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSEncryptFailed; + }; + + send_job.count = plain_buffer.len; + send_job.security = .{ + .tls = .{ + .encrypted = encrypted_buffer, + .encrypted_count = 0, + }, + }; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = encrypted_buffer, + .func = send_task, + .ctx = provision, + }); + }, + .plain => { + send_job.security = .plain; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = plain_buffer, + .func = send_task, + .ctx = provision, + }); + }, + } + }, + } + } + fn recv_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { const provision: *Provision = @ptrCast(@alignCast(ctx.?)); assert(provision.job == .recv); @@ -390,9 +462,10 @@ pub fn Server( } }; - var status: RecvStatus = recv_fn(rt, provision, p_config, z_config, recv_buffer); + var status: RecvStatus = @call(.auto, recv_fn, .{ rt, trigger_task, provision, p_config, z_config, recv_buffer }); switch (status) { + .spawned => return, .kill => { rt.stop(); return error.Killed; diff --git a/src/http/context.zig b/src/http/context.zig index 52bae26..d4e5fe0 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -1,21 +1,81 @@ const std = @import("std"); +const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/context"); const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; +const Provision = @import("../core/zprovision.zig").ZProvision(@import("protocol.zig").ProtocolData); + +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const ResponseSetOptions = Response.ResponseSetOptions; + +const Runtime = @import("tardy").Runtime; +const Task = @import("tardy").Task; +// Needed here to prevent a dependency loop. +const TaskFn = *const fn (*Runtime, *const Task, ?*anyopaque) anyerror!void; + +const raw_respond = @import("server.zig").raw_respond; + pub const Context = struct { allocator: std.mem.Allocator, + trigger: TaskFn, + runtime: *Runtime, + /// The Request that triggered this handler. + request: *const Request, + /// The Response that will be returned. + /// To actually trigger the send, use `Context.respond`. + response: *Response, path: []const u8, captures: []Capture, queries: *QueryMap, + provision: *Provision, + triggered: bool = false, - pub fn init(allocator: std.mem.Allocator, path: []const u8, captures: []Capture, queries: *QueryMap) Context { + pub fn init( + allocator: std.mem.Allocator, + trigger: TaskFn, + runtime: *Runtime, + ctx: *Provision, + request: *const Request, + response: *Response, + path: []const u8, + captures: []Capture, + queries: *QueryMap, + ) Context { return Context{ .allocator = allocator, + .trigger = trigger, + .runtime = runtime, + .provision = ctx, + .request = request, + .response = response, .path = path, .captures = captures, .queries = queries, }; } + + pub fn respond(self: *Context, options: ResponseSetOptions) void { + assert(!self.triggered); + self.triggered = true; + self.response.set(options); + + // this will write the data into the appropriate places. + const status = raw_respond(self.provision) catch unreachable; + + self.provision.job = .{ + .send = .{ + .count = 0, + .slice = status.send, + .security = undefined, + }, + }; + + self.runtime.spawn(.{ + .func = self.trigger, + .ctx = self.provision, + }) catch unreachable; + } }; diff --git a/src/http/response.zig b/src/http/response.zig index e5e14e9..b08c96e 100644 --- a/src/http/response.zig +++ b/src/http/response.zig @@ -59,7 +59,7 @@ pub const Response = struct { self.body = null; } - const ResponseSetOptions = struct { + pub const ResponseSetOptions = struct { status: ?Status = null, mime: ?Mime = null, body: ?[]const u8 = null, diff --git a/src/http/route.zig b/src/http/route.zig index 7500848..2b63d6a 100644 --- a/src/http/route.zig +++ b/src/http/route.zig @@ -6,7 +6,7 @@ const Response = @import("response.zig").Response; const Context = @import("context.zig").Context; -pub const RouteHandlerFn = *const fn (request: Request, response: *Response, context: Context) void; +pub const RouteHandlerFn = *const fn (context: *Context) void; pub const Route = struct { handlers: [9]?RouteHandlerFn = [_]?RouteHandlerFn{null} ** 9, diff --git a/src/http/router.zig b/src/http/router.zig index 515f465..89f5ca6 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -98,24 +98,18 @@ pub const Router = struct { ) !void { assert(!self.locked); const route = Route.init().get(struct { - pub fn handler_fn(request: Request, response: *Response, _: Context) void { - response.set(.{ - .status = .OK, - .mime = mime, - .body = bytes, - }); - + pub fn handler_fn(ctx: *Context) void { if (comptime builtin.mode == .Debug) { // Don't Cache in Debug. - response.headers.add( + ctx.response.headers.add( "Cache-Control", "no-cache", ) catch unreachable; } else { // Cache for 30 days. - response.headers.add( + ctx.response.headers.add( "Cache-Control", - comptime std.fmt.comptimePrint("max-age={d}", .{60 * 60 * 24 * 30}), + comptime std.fmt.comptimePrint("max-age={d}", .{std.time.s_per_day * 30}), ) catch unreachable; } @@ -124,15 +118,26 @@ pub const Router = struct { if (comptime bytes.len > 1024) { @setEvalBranchQuota(1_000_000); const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); - response.headers.add("ETag", etag[0..]) catch unreachable; + ctx.response.headers.add("ETag", etag[0..]) catch unreachable; - if (request.headers.get("If-None-Match")) |match| { + if (ctx.request.headers.get("If-None-Match")) |match| { if (std.mem.eql(u8, etag, match)) { - response.set_status(.@"Not Modified"); - response.set_body(""); + ctx.respond(.{ + .status = .@"Not Modified", + .mime = Mime.HTML, + .body = "", + }); + + return; } } } + + ctx.respond(.{ + .status = .OK, + .mime = mime, + .body = bytes, + }); } }.handler_fn); diff --git a/src/http/server.zig b/src/http/server.zig index 61a78d0..f8dd9dd 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -29,11 +29,13 @@ const Provision = @import("../core/zprovision.zig").ZProvision(ProtocolData); const RecvStatus = @import("../core/server.zig").RecvStatus; const zzzServer = @import("../core/server.zig").Server; +const TaskFn = @import("tardy").TaskFn; + /// Uses the current p.response to generate and queue up the sending /// of a response. This is used when we already know what we want to send. /// /// See: `route_and_respond` -fn raw_respond(p: *Provision) !RecvStatus { +pub inline fn raw_respond(p: *Provision) !RecvStatus { { const status_code: u16 = if (p.data.response.status) |status| @intFromEnum(status) else 0; const status_name = if (p.data.response.status) |status| @tagName(status) else "No Status"; @@ -47,22 +49,28 @@ fn raw_respond(p: *Provision) !RecvStatus { return .{ .send = pseudo }; } -fn route_and_respond(p: *Provision, router: *const Router) !RecvStatus { +fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: *const Router) !RecvStatus { route: { const found = router.get_route_from_host(p.data.request.uri, p.data.captures, &p.data.queries); if (found) |f| { const handler = f.route.get_handler(p.data.request.method); if (handler) |func| { - const context: Context = Context.init( + const context: *Context = try p.arena.allocator().create(Context); + context.* = Context.init( p.arena.allocator(), + trigger, + runtime, + p, + &p.data.request, + &p.data.response, p.data.request.uri, f.captures, f.queries, ); - @call(.auto, func, .{ p.data.request, &p.data.response, context }); - break :route; + @call(.auto, func, .{context}); + return .spawned; } else { // If we match the route but not the method. p.data.response.set(.{ @@ -114,13 +122,13 @@ fn route_and_respond(p: *Provision, router: *const Router) !RecvStatus { } pub fn recv_fn( - rt: *Runtime, + runtime: *Runtime, + trigger: TaskFn, provision: *Provision, p_config: *const ProtocolConfig, z_config: *const zzzConfig, recv_buffer: []const u8, ) RecvStatus { - _ = rt; _ = z_config; var stage = provision.data.stage; @@ -222,7 +230,7 @@ pub fn recv_fn( } if (!provision.data.request.expect_body()) { - return route_and_respond(provision, p_config.router) catch unreachable; + return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; } // Everything after here is a Request that is expecting a body. @@ -249,7 +257,7 @@ pub fn recv_fn( log.debug("{d} - got whole body with header", .{provision.index}); const body_end = header_end + difference; provision.data.request.set_body(provision.recv_buffer.items[header_end..body_end]); - return route_and_respond(provision, p_config.router) catch unreachable; + return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; } else { // Partial Body log.debug("{d} - got partial body with header", .{provision.index}); @@ -262,7 +270,7 @@ pub fn recv_fn( log.debug("{d} - got body of length 0", .{provision.index}); // Body of Length 0. provision.data.request.set_body(""); - return route_and_respond(provision, p_config.router) catch unreachable; + return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; } else { // Got only header. log.debug("{d} - got all header aka no body", .{provision.index}); @@ -313,7 +321,7 @@ pub fn recv_fn( if (job.count >= request_length) { provision.data.request.set_body(provision.recv_buffer.items[header_end..request_length]); - return route_and_respond(provision, p_config.router) catch unreachable; + return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; } else { return .recv; } From 6c900eb48fb14a89b96811c1a61a32bcd36fefb5 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Fri, 18 Oct 2024 03:38:49 -0700 Subject: [PATCH 02/17] http/router: asynchronous fs serving --- examples/http/fs/main.zig | 28 ++++++- src/core/server.zig | 9 ++- src/http/router.zig | 151 ++++++++++++++++++++++++++++++++------ 3 files changed, 162 insertions(+), 26 deletions(-) diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig index 5380ff4..13bf4f3 100644 --- a/examples/http/fs/main.zig +++ b/examples/http/fs/main.zig @@ -7,13 +7,15 @@ pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - const allocator = std.heap.page_allocator; + var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true }){ .backing_allocator = std.heap.c_allocator }; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); var router = http.Router.init(allocator); defer router.deinit(); try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + pub fn handler_fn(ctx: *http.Context) void { const body = \\ \\ @@ -23,7 +25,7 @@ pub fn main() !void { \\ ; - response.set(.{ + ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -31,9 +33,27 @@ pub fn main() !void { } }.handler_fn)); + try router.serve_route("/kill", http.Route.init().get(struct { + pub fn handler_fn(ctx: *http.Context) void { + ctx.runtime.stop(); + + ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = "", + }); + } + }.handler_fn)); + try router.serve_fs_dir("/static", "./examples/http/fs/static"); - var server = http.Server(.plain, .auto).init(.{ .allocator = allocator }); + var server = http.Server(.plain, .auto).init(.{ + .allocator = allocator, + .threading = .auto, + .size_connections_max = 256, + }); + defer server.deinit(); + try server.bind(host, port); try server.listen(.{ .router = &router }); } diff --git a/src/core/server.zig b/src/core/server.zig index 585147f..f40bd02 100644 --- a/src/core/server.zig +++ b/src/core/server.zig @@ -239,7 +239,14 @@ pub fn Server( provision.job = .empty; _ = provision.arena.reset(.{ .retain_with_limit = z_config.size_connection_arena_retain }); provision.data.clean(); - provision.recv_buffer.clearRetainingCapacity(); + + // TODO: new z_config setting here! + if (provision.recv_buffer.items.len > 1024) { + provision.recv_buffer.shrinkRetainingCapacity(1024); + } else { + provision.recv_buffer.clearRetainingCapacity(); + } + pool.release(provision.index); const accept_queued = rt.storage.get_ptr("accept_queued", bool); diff --git a/src/http/router.zig b/src/http/router.zig index 89f5ca6..4824781 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -13,6 +13,9 @@ const Context = @import("context.zig").Context; const RoutingTrie = @import("routing_trie.zig").RoutingTrie; const QueryMap = @import("routing_trie.zig").QueryMap; +const Runtime = @import("tardy").Runtime; +const Task = @import("tardy").Task; + pub const Router = struct { allocator: std.mem.Allocator, routes: RoutingTrie, @@ -29,16 +32,111 @@ pub const Router = struct { self.routes.deinit(); } + const FileProvision = struct { + mime: Mime, + context: *Context, + fd: std.posix.fd_t, + offset: usize, + list: std.ArrayList(u8), + buffer: []u8, + }; + + fn open_file_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { + const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); + errdefer { + provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + } + + const fd = t.result.?.fd; + if (fd <= -1) { + provision.context.respond(.{ + .status = .@"Not Found", + .mime = Mime.HTML, + .body = "File Not Found", + }); + return; + } + provision.fd = fd; + + try rt.fs.read(.{ + .fd = fd, + .buffer = provision.buffer, + .offset = 0, + .func = read_file_task, + .ctx = provision, + }); + } + + fn read_file_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { + const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); + errdefer { + provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + } + + const result: i32 = t.result.?.value; + if (result <= 0) { + // If we are done reading... + try rt.fs.close(.{ + .fd = provision.fd, + .func = close_file_task, + .ctx = provision, + }); + return; + } + + const length: usize = @intCast(result); + + try provision.list.appendSlice(provision.buffer[0..length]); + + // TODO: This needs to be a setting you pass in to the router. + // + //if (provision.list.items.len > 1024 * 1024 * 4) { + // provision.context.respond(.{ + // .status = .@"Content Too Large", + // .mime = Mime.HTML, + // .body = "File Too Large", + // }); + // return; + //} + + provision.offset += length; + + try rt.fs.read(.{ + .fd = provision.fd, + .buffer = provision.buffer, + .offset = provision.offset, + .func = read_file_task, + .ctx = provision, + }); + } + + fn close_file_task(_: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { + const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); + + provision.context.respond(.{ + .status = .OK, + .mime = provision.mime, + .body = provision.list.items[0..], + }); + } + pub fn serve_fs_dir(self: *Router, comptime url_path: []const u8, comptime dir_path: []const u8) !void { assert(!self.locked); const route = Route.init().get(struct { - pub fn handler_fn(request: Request, response: *Response, context: Context) void { - _ = request; + pub fn handler_fn(ctx: *Context) void { + const search_path = ctx.captures[0].remaining; - const search_path = context.captures[0].remaining; - const file_path = std.fmt.allocPrint(context.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { - response.set(.{ + const file_path = std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { + ctx.respond(.{ .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", @@ -46,39 +144,50 @@ pub const Router = struct { return; }; + // TODO: Ensure that paths cannot go out of scope and reference data that they shouldn't be allowed to. + // Very important. + const extension_start = std.mem.lastIndexOfScalar(u8, search_path, '.'); const mime: Mime = blk: { if (extension_start) |start| { break :blk Mime.from_extension(search_path[start..]); } else { - break :blk Mime.HTML; + break :blk Mime.BIN; } }; - const file: std.fs.File = std.fs.cwd().openFile(file_path, .{}) catch { - response.set(.{ - .status = .@"Not Found", + const provision = ctx.allocator.create(FileProvision) catch { + ctx.respond(.{ + .status = .@"Internal Server Error", .mime = Mime.HTML, - .body = "File Not Found", + .body = "", }); return; }; - defer file.close(); - const file_bytes = file.readToEndAlloc(context.allocator, 1024 * 1024 * 4) catch { - response.set(.{ - .status = .@"Content Too Large", + provision.* = .{ + .mime = mime, + .context = ctx, + .fd = -1, + .offset = 0, + .list = std.ArrayList(u8).init(ctx.allocator), + .buffer = ctx.provision.buffer, + }; + + // We also need to support chunked encoding. + // It makes a lot more sense for files atleast. + ctx.runtime.fs.open(.{ + .path = file_path, + .func = open_file_task, + .ctx = provision, + }) catch { + ctx.respond(.{ + .status = .@"Internal Server Error", .mime = Mime.HTML, - .body = "File Too Large", + .body = "", }); return; }; - - response.set(.{ - .status = .OK, - .mime = mime, - .body = file_bytes, - }); } }.handler_fn); From 4b624c29d92642e6a8abee6011a23e6b969c0d34 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Mon, 21 Oct 2024 16:16:13 -0700 Subject: [PATCH 03/17] testing supporting only http[s] --- examples/http/basic/main.zig | 3 +- src/core/lib.zig | 1 - src/core/server.zig | 926 ------------------------ src/core/zprovision.zig | 41 -- src/http/context.zig | 2 +- src/http/lib.zig | 1 - src/http/protocol.zig | 78 -- src/http/provision.zig | 66 ++ src/http/server.zig | 1296 ++++++++++++++++++++++++++++------ 9 files changed, 1152 insertions(+), 1262 deletions(-) delete mode 100644 src/core/server.zig delete mode 100644 src/core/zprovision.zig delete mode 100644 src/http/protocol.zig create mode 100644 src/http/provision.zig diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index 088b134..9181d97 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -34,11 +34,12 @@ pub fn main() !void { }.handler_fn)); var server = http.Server(.plain, .auto).init(.{ + .router = &router, .allocator = allocator, .threading = .single, }); defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/src/core/lib.zig b/src/core/lib.zig index e091ee8..f21f77a 100644 --- a/src/core/lib.zig +++ b/src/core/lib.zig @@ -1,3 +1,2 @@ pub const Job = @import("job.zig").Job; pub const Pseudoslice = @import("pseudoslice.zig").Pseudoslice; -pub const Server = @import("server.zig").Server; diff --git a/src/core/server.zig b/src/core/server.zig deleted file mode 100644 index f40bd02..0000000 --- a/src/core/server.zig +++ /dev/null @@ -1,926 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const assert = std.debug.assert; -const log = std.log.scoped(.@"zzz/server"); - -const Pseudoslice = @import("pseudoslice.zig").Pseudoslice; -const ZProvision = @import("zprovision.zig").ZProvision; - -const TLSFileOptions = @import("../tls/lib.zig").TLSFileOptions; -const TLSContext = @import("../tls/lib.zig").TLSContext; -const TLS = @import("../tls/lib.zig").TLS; - -const Pool = @import("tardy").Pool; -pub const Threading = @import("tardy").TardyThreading; -pub const Runtime = @import("tardy").Runtime; -pub const Task = @import("tardy").Task; -const TaskFn = @import("tardy").TaskFn; -pub const AsyncIOType = @import("tardy").AsyncIOType; -const TardyCreator = @import("tardy").Tardy; -const Cross = @import("tardy").Cross; - -pub const RecvStatus = union(enum) { - kill, - recv, - send: Pseudoslice, - spawned, -}; - -/// Security Model to use.chinp acas -/// -/// Default: .plain (plaintext) -pub const Security = union(enum) { - plain, - tls: struct { - cert: TLSFileOptions, - key: TLSFileOptions, - cert_name: []const u8 = "CERTIFICATE", - key_name: []const u8 = "PRIVATE KEY", - }, -}; - -/// These are various general configuration -/// options that are important for the actual framework. -/// -/// This includes various different options and limits -/// for interacting with the underlying network. -pub const zzzConfig = struct { - /// The allocator that server will use. - allocator: std.mem.Allocator, - /// Threading Model to use. - /// - /// Default: .auto - threading: Threading = .auto, - /// Kernel Backlog Value. - size_backlog: u31 = 512, - /// Number of Maximum Concurrent Connections. - /// - /// This is applied PER thread if using multi-threading. - /// zzz will drop/close any connections greater - /// than this. - /// - /// You want to tune this to your expected number - /// of maximum connections. - /// - /// Default: 1024 - size_connections_max: u16 = 1024, - /// Maximum number of completions we can reap - /// with a single call of reap(). - /// - /// Default: 256 - size_completions_reap_max: u16 = 256, - /// Amount of allocated memory retained - /// after an arena is cleared. - /// - /// A higher value will increase memory usage but - /// should make allocators faster.Tardy - /// - /// A lower value will reduce memory usage but - /// will make allocators slower. - /// - /// Default: 1KB - size_connection_arena_retain: u32 = 1024, - /// Size of the buffer (in bytes) used for - /// interacting with the socket. - /// - /// Default: 4 KB. - size_socket_buffer: u32 = 1024 * 4, - /// Maximum size (in bytes) of the Recv buffer. - /// This is mainly a concern when you are reading in - /// large requests before responding. - /// - /// Default: 2MB. - size_recv_buffer_max: u32 = 1024 * 1024 * 2, -}; - -fn RecvFn(comptime ProtocolData: type, comptime ProtocolConfig: type) type { - return *const fn ( - rt: *Runtime, - trigger_task: TaskFn, - provision: *ZProvision(ProtocolData), - p_config: *const ProtocolConfig, - z_config: *const zzzConfig, - recv_buffer: []const u8, - ) RecvStatus; -} - -pub fn Server( - comptime security: Security, - comptime async_type: AsyncIOType, - comptime ProtocolData: type, - comptime ProtocolConfig: type, - comptime recv_fn: RecvFn(ProtocolData, ProtocolConfig), -) type { - const TLSContextType = comptime if (security == .tls) TLSContext else void; - const TLSType = comptime if (security == .tls) ?TLS else void; - const Provision = ZProvision(ProtocolData); - const Tardy = TardyCreator(async_type); - - return struct { - const Self = @This(); - allocator: std.mem.Allocator, - tardy: Tardy, - config: zzzConfig, - addr: std.net.Address, - tls_ctx: TLSContextType, - - pub fn init(config: zzzConfig) Self { - const tls_ctx = switch (comptime security) { - .tls => |inner| TLSContext.init(.{ - .allocator = config.allocator, - .cert = inner.cert, - .cert_name = inner.cert_name, - .key = inner.key, - .key_name = inner.key_name, - .size_tls_buffer_max = config.size_socket_buffer * 2, - }) catch unreachable, - .plain => void{}, - }; - - return Self{ - .allocator = config.allocator, - .tardy = Tardy.init(.{ - .allocator = config.allocator, - .threading = config.threading, - .size_tasks_max = config.size_connections_max, - .size_aio_jobs_max = config.size_connections_max, - .size_aio_reap_max = config.size_completions_reap_max, - }) catch unreachable, - .config = config, - .addr = undefined, - .tls_ctx = tls_ctx, - }; - } - - pub fn deinit(self: *Self) void { - if (comptime security == .tls) { - self.tls_ctx.deinit(); - } - - self.tardy.deinit(); - } - - fn create_socket(self: *const Self) !std.posix.socket_t { - const socket: std.posix.socket_t = blk: { - const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; - break :blk try std.posix.socket( - self.addr.any.family, - socket_flags, - std.posix.IPPROTO.TCP, - ); - }; - - log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); - - if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT_LB, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEADDR, - &std.mem.toBytes(@as(c_int, 1)), - ); - } - - try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); - return socket; - } - - /// If you are using a custom implementation that does NOT rely - /// on TCP/IP, you can SKIP calling this method and just set the - /// socket value yourself. - /// - /// This is only allowed on certain targets that do not have TCP/IP - /// support. - pub fn bind(self: *Self, host: []const u8, port: u16) !void { - assert(host.len > 0); - assert(port > 0); - - self.addr = blk: { - switch (comptime builtin.os.tag) { - .windows => break :blk try std.net.Address.parseIp(host, port), - else => break :blk try std.net.Address.resolveIp(host, port), - } - }; - } - - fn close_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .close); - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - log.info("{d} - closing connection", .{provision.index}); - - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - tls_ptr.*.?.deinit(); - tls_ptr.* = null; - } - - provision.socket = Cross.socket.INVALID_SOCKET; - provision.job = .empty; - _ = provision.arena.reset(.{ .retain_with_limit = z_config.size_connection_arena_retain }); - provision.data.clean(); - - // TODO: new z_config setting here! - if (provision.recv_buffer.items.len > 1024) { - provision.recv_buffer.shrinkRetainingCapacity(1024); - } else { - provision.recv_buffer.clearRetainingCapacity(); - } - - pool.release(provision.index); - - const accept_queued = rt.storage.get_ptr("accept_queued", bool); - if (!accept_queued.*) { - accept_queued.* = true; - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); - } - } - - fn accept_task(rt: *Runtime, t: *const Task, _: ?*anyopaque) !void { - const child_socket = t.result.?.socket; - - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const accept_queued = rt.storage.get_ptr("accept_queued", bool); - accept_queued.* = false; - - if (rt.scheduler.tasks.clean() >= 2) { - accept_queued.* = true; - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); - } - - if (!Cross.socket.is_valid(child_socket)) { - log.err("socket accept failed", .{}); - return error.AcceptFailed; - } - - // This should never fail. It means that we have a dangling item. - assert(pool.clean() > 0); - const borrowed = pool.borrow_hint(t.index) catch unreachable; - - log.info("{d} - accepting connection", .{borrowed.index}); - log.debug( - "empty provision slots: {d}", - .{pool.items.len - pool.dirty.count()}, - ); - assert(borrowed.item.job == .empty); - - try Cross.socket.disable_nagle(child_socket); - try Cross.socket.to_nonblock(child_socket); - - const provision = borrowed.item; - - // Store the index of this item. - provision.index = @intCast(borrowed.index); - provision.socket = child_socket; - - switch (comptime security) { - .tls => |_| { - const tls_ctx = rt.storage.get_const_ptr("tls_ctx", TLSContextType); - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* == null); - - tls_ptr.* = tls_ctx.create(child_socket) catch |e| { - log.err("{d} - tls creation failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSCreationFailed; - }; - - const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { - log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSStartHandshakeFailed; - }; - - provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = recv_buf, - .func = handshake_task, - .ctx = borrowed.item, - }); - }, - .plain => { - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = borrowed.item, - }); - }, - } - } - - /// This is the task you MUST trigger if the `recv_fn` returns `.spawned`. - fn trigger_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - - switch (provision.job) { - else => unreachable, - .recv => { - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - .send => |*send_job| { - const z_config: *const zzzConfig = @ptrCast(@alignCast(rt.storage.get("z_config").?)); - const plain_buffer = send_job.slice.get(0, z_config.size_socket_buffer); - - switch (comptime security) { - .tls => |_| { - const tls_slice: []TLSType = @as( - [*]TLSType, - @ptrCast(@alignCast(rt.storage.get("tls_slice").?)), - )[0..z_config.size_connections_max]; - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .fd = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSEncryptFailed; - }; - - send_job.count = plain_buffer.len; - send_job.security = .{ - .tls = .{ - .encrypted = encrypted_buffer, - .encrypted_count = 0, - }, - }; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = encrypted_buffer, - .func = send_task, - .ctx = provision, - }); - }, - .plain => { - send_job.security = .plain; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); - }, - } - }, - } - } - - fn recv_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .recv); - const length: i32 = t.result.?.value; - - const p_config = rt.storage.get_const_ptr("p_config", ProtocolConfig); - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - const recv_job = &provision.job.recv; - - // If the socket is closed. - if (length <= 0) { - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return; - } - - log.debug("{d} - recv triggered", .{provision.index}); - - const recv_count: usize = @intCast(length); - recv_job.count += recv_count; - const pre_recv_buffer = provision.buffer[0..recv_count]; - - const recv_buffer = blk: { - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { - log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSDecryptFailed; - }; - }, - .plain => break :blk pre_recv_buffer, - } - }; - - var status: RecvStatus = @call(.auto, recv_fn, .{ rt, trigger_task, provision, p_config, z_config, recv_buffer }); - - switch (status) { - .spawned => return, - .kill => { - rt.stop(); - return error.Killed; - }, - .recv => { - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - .send => |*pslice| { - const plain_buffer = pslice.get(0, z_config.size_socket_buffer); - - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSEncryptFailed; - }; - - provision.job = .{ - .send = .{ - .slice = pslice.*, - .count = @intCast(plain_buffer.len), - .security = .{ - .tls = .{ - .encrypted = encrypted_buffer, - .encrypted_count = 0, - }, - }, - }, - }; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = encrypted_buffer, - .func = send_task, - .ctx = provision, - }); - }, - .plain => { - provision.job = .{ - .send = .{ - .slice = pslice.*, - .count = 0, - .security = .plain, - }, - }; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); - }, - } - }, - } - } - - fn handshake_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - log.debug("Handshake Task", .{}); - assert(security == .tls); - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - const length: i32 = t.result.?.value; - - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - assert(provision.job == .handshake); - const handshake_job = &provision.job.handshake; - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - log.debug("processing handshake", .{}); - handshake_job.count += 1; - - if (length <= 0) { - log.debug("handshake connection closed", .{}); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeClosed; - } - - if (handshake_job.count >= 50) { - log.debug("handshake taken too many cycles", .{}); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeTooManyCycles; - } - - const hs_length: usize = @intCast(length); - - switch (handshake_job.state) { - .recv => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .recv = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on recv failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeRecvFailed; - }; - - switch (hstate) { - .recv => |buf| { - log.debug("requeing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("queueing send in handshake", .{}); - handshake_job.state = .send; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } - }, - .send => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .send = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on send failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeSendFailed; - }; - - switch (hstate) { - .recv => |buf| { - handshake_job.state = .recv; - log.debug("queuing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("requeing send in handshake", .{}); - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } - }, - } - } else unreachable; - } - - fn send_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - assert(provision.job == .send); - const length: i32 = t.result.?.value; - - const z_config = rt.storage.get_const_ptr("z_config", zzzConfig); - - // If the socket is closed. - if (length <= 0) { - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return; - } - - const send_job = &provision.job.send; - - log.debug("{d} - send triggered", .{provision.index}); - const send_count: usize = @intCast(length); - log.debug("{d} - send length: {d}", .{ provision.index, send_count }); - - switch (comptime security) { - .tls => { - assert(send_job.security == .tls); - - const tls_slice = rt.storage.get("tls_slice", []TLSType); - - const job_tls = &send_job.security.tls; - job_tls.encrypted_count += send_count; - - if (job_tls.encrypted_count >= job_tls.encrypted.len) { - if (send_job.count >= send_job.slice.len) { - // All done sending. - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = z_config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; - - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - } else { - // Queue a new chunk up for sending. - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); - - const inner_slice = send_job.slice.get( - send_job.count, - send_job.count + z_config.size_socket_buffer, - ); - - send_job.count += @intCast(inner_slice.len); - - const tls_ptr: *?TLS = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSEncryptFailed; - }; - - job_tls.encrypted = encrypted; - job_tls.encrypted_count = 0; - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = job_tls.encrypted, - .func = send_task, - .ctx = provision, - }); - } - } else { - log.debug( - "{d} - sending next encrypted chunk starting at index {d}", - .{ provision.index, job_tls.encrypted_count }, - ); - - const remainder = job_tls.encrypted[job_tls.encrypted_count..]; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = remainder, - .func = send_task, - .ctx = provision, - }); - } - }, - .plain => { - assert(send_job.security == .plain); - send_job.count += send_count; - - if (send_job.count >= send_job.slice.len) { - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = z_config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; - - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - } else { - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); - - const plain_buffer = send_job.slice.get( - send_job.count, - send_job.count + z_config.size_socket_buffer, - ); - - log.debug("{d} - chunk ends at: {d}", .{ - provision.index, - plain_buffer.len + send_job.count, - }); - - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); - } - }, - } - } - - pub fn listen(self: *Self, protocol_config: ProtocolConfig) !void { - log.info("server listening...", .{}); - log.info("security mode: {s}", .{@tagName(security)}); - - const EntryParams = struct { - zzz: *Self, - p_config: *ProtocolConfig, - }; - - try self.tardy.entry( - struct { - fn rt_start(rt: *Runtime, alloc: std.mem.Allocator, params: EntryParams) !void { - const socket = try params.zzz.create_socket(); - try std.posix.listen(socket, params.zzz.config.size_backlog); - - // use the arena here. - var pool_params = params.zzz.config; - pool_params.allocator = alloc; - - const provision_pool = try alloc.create(Pool(Provision)); - provision_pool.* = try Pool(Provision).init( - alloc, - params.zzz.config.size_connections_max, - Provision.init_hook, - pool_params, - ); - - for (provision_pool.items) |*provision| { - provision.data = ProtocolData.init(alloc, params.p_config); - } - - try rt.storage.store_ptr("provision_pool", provision_pool); - try rt.storage.store_ptr("z_config", ¶ms.zzz.config); - try rt.storage.store_ptr("p_config", params.p_config); - - if (comptime security == .tls) { - const tls_slice = try alloc.alloc( - TLSType, - params.zzz.config.size_connections_max, - ); - if (comptime security == .tls) { - for (tls_slice) |*tls| { - tls.* = null; - } - } - - // since slices are fat pointers... - try rt.storage.store_alloc("tls_slice", tls_slice); - try rt.storage.store_ptr("tls_ctx", ¶ms.zzz.tls_ctx); - } - - try rt.storage.store_alloc("server_socket", socket); - try rt.storage.store_alloc("accept_queued", true); - - try rt.net.accept(.{ - .socket = socket, - .func = accept_task, - }); - } - }.rt_start, - EntryParams{ - .zzz = self, - .p_config = @constCast(&protocol_config), - }, - struct { - fn rt_end(rt: *Runtime, alloc: std.mem.Allocator, _: anytype) void { - // clean up socket. - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - std.posix.close(server_socket); - - // clean up provision pool. - const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - for (provision_pool.items) |*provision| { - provision.data.deinit(alloc); - } - provision_pool.deinit(Provision.deinit_hook, alloc); - alloc.destroy(provision_pool); - - // clean up TLS. - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - alloc.free(tls_slice); - } - } - }.rt_end, - void, - ); - } - }; -} diff --git a/src/core/zprovision.zig b/src/core/zprovision.zig deleted file mode 100644 index 3e2f63e..0000000 --- a/src/core/zprovision.zig +++ /dev/null @@ -1,41 +0,0 @@ -const std = @import("std"); -const panic = std.debug.panic; -const Job = @import("../core/lib.zig").Job; -const TLS = @import("../tls/lib.zig").TLS; - -pub fn ZProvision(comptime ProtocolData: type) type { - return struct { - const Self = @This(); - index: usize, - job: Job, - socket: std.posix.socket_t, - buffer: []u8, - recv_buffer: std.ArrayList(u8), - arena: std.heap.ArenaAllocator, - data: ProtocolData, - - pub fn init_hook(provisions: []Self, ctx: anytype) void { - for (provisions) |*provision| { - provision.job = .empty; - provision.socket = undefined; - provision.data = undefined; - // Create Buffer - provision.buffer = ctx.allocator.alloc(u8, ctx.size_socket_buffer) catch { - panic("attempting to statically allocate more memory than available. (Socket Buffer)", .{}); - }; - // Create Recv Buffer - provision.recv_buffer = std.ArrayList(u8).init(ctx.allocator); - // Create the Context Arena - provision.arena = std.heap.ArenaAllocator.init(ctx.allocator); - } - } - - pub fn deinit_hook(provisions: []Self, allocator: anytype) void { - for (provisions) |*provision| { - allocator.free(provision.buffer); - provision.recv_buffer.deinit(); - provision.arena.deinit(); - } - } - }; -} diff --git a/src/http/context.zig b/src/http/context.zig index d4e5fe0..d3356f0 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -5,7 +5,7 @@ const log = std.log.scoped(.@"zzz/http/context"); const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; -const Provision = @import("../core/zprovision.zig").ZProvision(@import("protocol.zig").ProtocolData); +const Provision = @import("provision.zig").Provision; const Request = @import("request.zig").Request; const Response = @import("response.zig").Response; diff --git a/src/http/lib.zig b/src/http/lib.zig index 97e43ac..468cdd1 100644 --- a/src/http/lib.zig +++ b/src/http/lib.zig @@ -9,7 +9,6 @@ pub const Router = @import("router.zig").Router; pub const RouteHandlerFn = @import("route.zig").RouteHandlerFn; pub const Context = @import("context.zig").Context; pub const Headers = @import("headers.zig").Headers; -pub const Protocol = @import("protocol.zig"); pub const Server = @import("server.zig").Server; diff --git a/src/http/protocol.zig b/src/http/protocol.zig deleted file mode 100644 index 23fab20..0000000 --- a/src/http/protocol.zig +++ /dev/null @@ -1,78 +0,0 @@ -const std = @import("std"); -const Job = @import("../core/lib.zig").Job; -const Capture = @import("routing_trie.zig").Capture; -const Query = @import("routing_trie.zig").Query; -const QueryMap = @import("routing_trie.zig").QueryMap; -const Request = @import("request.zig").Request; -const Response = @import("response.zig").Response; -const Stage = @import("stage.zig").Stage; -const Router = @import("router.zig").Router; - -pub const ProtocolConfig = struct { - router: *Router, - num_header_max: u32 = 32, - /// Maximum number of Captures in a Route - /// - /// Default: 8 - num_captures_max: u32 = 8, - /// Maximum number of Queries in a URL - /// - /// Default: 8 - num_queries_max: u32 = 8, - /// Maximum size (in bytes) of the Request. - /// - /// Default: 2MB. - size_request_max: u32 = 1024 * 1024 * 2, - /// Maximum size (in bytes) of the Request URI. - /// - /// Default: 2KB. - size_request_uri_max: u32 = 1024 * 2, -}; - -pub const ProtocolData = struct { - captures: []Capture, - queries: QueryMap, - request: Request, - response: Response, - stage: Stage, - - pub fn init(allocator: std.mem.Allocator, config: *const ProtocolConfig) ProtocolData { - var queries = QueryMap.init(allocator); - queries.ensureTotalCapacity(config.num_queries_max) catch unreachable; - - return ProtocolData{ - .stage = .header, - .captures = allocator.alloc(Capture, config.num_captures_max) catch unreachable, - .queries = queries, - .request = Request.init(allocator, .{ - .num_header_max = config.num_header_max, - .size_request_max = config.size_request_max, - .size_request_uri_max = config.size_request_uri_max, - }) catch unreachable, - .response = Response.init(allocator, .{ - .num_headers_max = config.num_header_max, - }) catch unreachable, - }; - } - - pub fn deinit(self: *ProtocolData, allocator: std.mem.Allocator) void { - self.request.deinit(); - self.response.deinit(); - self.queries.deinit(); - allocator.free(self.captures); - } - - pub fn clean(self: *ProtocolData) void { - self.response.clear(); - } -}; - -const testing = std.testing; - -test "ProtocolData deinit" { - const config: ProtocolConfig = .{ .router = undefined }; - var x = ProtocolData.init(testing.allocator, &config); - defer x.deinit(testing.allocator); - - try testing.expectEqual(x.stage, .header); -} diff --git a/src/http/provision.zig b/src/http/provision.zig new file mode 100644 index 0000000..7802468 --- /dev/null +++ b/src/http/provision.zig @@ -0,0 +1,66 @@ +const std = @import("std"); + +const Job = @import("../core/job.zig").Job; +const Capture = @import("routing_trie.zig").Capture; +const QueryMap = @import("routing_trie.zig").QueryMap; +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const Stage = @import("stage.zig").Stage; +const ServerConfig = @import("server.zig").ServerConfig; + +pub const Provision = struct { + index: usize, + job: Job, + socket: std.posix.socket_t, + buffer: []u8, + recv_buffer: std.ArrayList(u8), + arena: std.heap.ArenaAllocator, + captures: []Capture, + queries: QueryMap, + request: Request, + response: Response, + stage: Stage, + + pub fn init_hook(provisions: []Provision, config: anytype) void { + for (provisions) |*provision| { + provision.job = .empty; + provision.socket = undefined; + // Create Buffer + provision.buffer = config.allocator.alloc(u8, config.size_socket_buffer) catch { + @panic("attempting to statically allocate more memory than available. (Socket Buffer)"); + }; + // Create Recv Buffer + provision.recv_buffer = std.ArrayList(u8).init(config.allocator); + // Create the Context Arena + provision.arena = std.heap.ArenaAllocator.init(config.allocator); + + provision.stage = .header; + provision.captures = config.allocator.alloc(Capture, config.num_captures_max) catch unreachable; + + var queries = QueryMap.init(config.allocator); + queries.ensureTotalCapacity(config.num_queries_max) catch unreachable; + provision.queries = queries; + + provision.request = Request.init(config.allocator, .{ + .num_header_max = config.num_header_max, + .size_request_max = config.size_request_max, + .size_request_uri_max = config.size_request_uri_max, + }) catch unreachable; + provision.response = Response.init(config.allocator, .{ + .num_headers_max = config.num_header_max, + }) catch unreachable; + } + } + + pub fn deinit_hook(provisions: []Provision, allocator: anytype) void { + for (provisions) |*provision| { + allocator.free(provision.buffer); + provision.recv_buffer.deinit(); + provision.arena.deinit(); + provision.request.deinit(); + provision.response.deinit(); + provision.queries.deinit(); + allocator.free(provision.captures); + } + } +}; diff --git a/src/http/server.zig b/src/http/server.zig index f8dd9dd..78b3088 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -1,35 +1,124 @@ const std = @import("std"); - const builtin = @import("builtin"); const assert = std.debug.assert; -const panic = std.debug.panic; const log = std.log.scoped(.@"zzz/http/server"); -const Runtime = @import("tardy").Runtime; -const AsyncIOType = @import("tardy").AsyncIOType; -const Pool = @import("tardy").Pool; +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; -const Job = @import("../core/lib.zig").Job; -const Pseudoslice = @import("../core/lib.zig").Pseudoslice; +const TLSFileOptions = @import("../tls/lib.zig").TLSFileOptions; +const TLSContext = @import("../tls/lib.zig").TLSContext; +const TLS = @import("../tls/lib.zig").TLS; +const Provision = @import("provision.zig").Provision; +const Mime = @import("mime.zig").Mime; +const Router = @import("router.zig").Router; +const Context = @import("context.zig").Context; const HTTPError = @import("lib.zig").HTTPError; -const Request = @import("lib.zig").Request; -const Response = @import("lib.zig").Response; -const Mime = @import("lib.zig").Mime; -const Context = @import("lib.zig").Context; -const Router = @import("lib.zig").Router; - -const Capture = @import("routing_trie.zig").Capture; -const ProtocolData = @import("protocol.zig").ProtocolData; -const ProtocolConfig = @import("protocol.zig").ProtocolConfig; -const Security = @import("../core/server.zig").Security; -const zzzConfig = @import("../core/server.zig").zzzConfig; -const Provision = @import("../core/zprovision.zig").ZProvision(ProtocolData); - -const RecvStatus = @import("../core/server.zig").RecvStatus; -const zzzServer = @import("../core/server.zig").Server; +const Pool = @import("tardy").Pool; +pub const Threading = @import("tardy").TardyThreading; +pub const Runtime = @import("tardy").Runtime; +pub const Task = @import("tardy").Task; const TaskFn = @import("tardy").TaskFn; +pub const AsyncIOType = @import("tardy").AsyncIOType; +const TardyCreator = @import("tardy").Tardy; +const Cross = @import("tardy").Cross; + +pub const RecvStatus = union(enum) { + kill, + recv, + send: Pseudoslice, + spawned, +}; + +/// Security Model to use.chinp acas +/// +/// Default: .plain (plaintext) +pub const Security = union(enum) { + plain, + tls: struct { + cert: TLSFileOptions, + key: TLSFileOptions, + cert_name: []const u8 = "CERTIFICATE", + key_name: []const u8 = "PRIVATE KEY", + }, +}; + +/// These are various general configuration +/// options that are important for the actual framework. +/// +/// This includes various different options and limits +/// for interacting with the underlying network. +pub const ServerConfig = struct { + /// The allocator that server will use. + allocator: std.mem.Allocator, + /// HTTP Request Router. + router: *Router, + /// Threading Model to use. + /// + /// Default: .auto + threading: Threading = .auto, + /// Kernel Backlog Value. + size_backlog: u31 = 512, + /// Number of Maximum Concurrent Connections. + /// + /// This is applied PER thread if using multi-threading. + /// zzz will drop/close any connections greater + /// than this. + /// + /// You want to tune this to your expected number + /// of maximum connections. + /// + /// Default: 1024 + size_connections_max: u16 = 1024, + /// Maximum number of completions we can reap + /// with a single call of reap(). + /// + /// Default: 256 + size_completions_reap_max: u16 = 256, + /// Amount of allocated memory retained + /// after an arena is cleared. + /// + /// A higher value will increase memory usage but + /// should make allocators faster.Tardy + /// + /// A lower value will reduce memory usage but + /// will make allocators slower. + /// + /// Default: 1KB + size_connection_arena_retain: u32 = 1024, + /// Size of the buffer (in bytes) used for + /// interacting with the socket. + /// + /// Default: 4 KB. + size_socket_buffer: u32 = 1024 * 4, + /// Maximum size (in bytes) of the Recv buffer. + /// This is mainly a concern when you are reading in + /// large requests before responding. + /// + /// Default: 2MB. + size_recv_buffer_max: u32 = 1024 * 1024 * 2, + /// Maximum number of Headers in a Request/Response + /// + /// Default: 32 + num_header_max: u32 = 32, + /// Maximum number of Captures in a Route + /// + /// Default: 8 + num_captures_max: u32 = 8, + /// Maximum number of Queries in a URL + /// + /// Default: 8 + num_queries_max: u32 = 8, + /// Maximum size (in bytes) of the Request. + /// + /// Default: 2MB. + size_request_max: u32 = 1024 * 1024 * 2, + /// Maximum size (in bytes) of the Request URI. + /// + /// Default: 2KB. + size_request_uri_max: u32 = 1024 * 2, +}; /// Uses the current p.response to generate and queue up the sending /// of a response. This is used when we already know what we want to send. @@ -37,23 +126,23 @@ const TaskFn = @import("tardy").TaskFn; /// See: `route_and_respond` pub inline fn raw_respond(p: *Provision) !RecvStatus { { - const status_code: u16 = if (p.data.response.status) |status| @intFromEnum(status) else 0; - const status_name = if (p.data.response.status) |status| @tagName(status) else "No Status"; + const status_code: u16 = if (p.response.status) |status| @intFromEnum(status) else 0; + const status_name = if (p.response.status) |status| @tagName(status) else "No Status"; log.info("{d} - {d} {s}", .{ p.index, status_code, status_name }); } - const body = p.data.response.body orelse ""; - const header_buffer = try p.data.response.headers_into_buffer(p.buffer, @intCast(body.len)); - p.data.response.headers.clear(); + const body = p.response.body orelse ""; + const header_buffer = try p.response.headers_into_buffer(p.buffer, @intCast(body.len)); + p.response.headers.clear(); const pseudo = Pseudoslice.init(header_buffer, body, p.buffer); return .{ .send = pseudo }; } fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: *const Router) !RecvStatus { route: { - const found = router.get_route_from_host(p.data.request.uri, p.data.captures, &p.data.queries); + const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); if (found) |f| { - const handler = f.route.get_handler(p.data.request.method); + const handler = f.route.get_handler(p.request.method); if (handler) |func| { const context: *Context = try p.arena.allocator().create(Context); @@ -62,9 +151,9 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: trigger, runtime, p, - &p.data.request, - &p.data.response, - p.data.request.uri, + &p.request, + &p.response, + p.request.uri, f.captures, f.queries, ); @@ -73,7 +162,7 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: return .spawned; } else { // If we match the route but not the method. - p.data.response.set(.{ + p.response.set(.{ .status = .@"Method Not Allowed", .mime = Mime.HTML, .body = "405 Method Not Allowed", @@ -82,7 +171,7 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: // We also need to add to Allow header. // This uses the connection's arena to allocate 64 bytes. const allowed = f.route.get_allowed(p.arena.allocator()) catch { - p.data.response.set(.{ + p.response.set(.{ .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", @@ -91,8 +180,8 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: break :route; }; - p.data.response.headers.add("Allow", allowed) catch { - p.data.response.set(.{ + p.response.headers.add("Allow", allowed) catch { + p.response.set(.{ .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", @@ -106,7 +195,7 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: } // Didn't match any route. - p.data.response.set(.{ + p.response.set(.{ .status = .@"Not Found", .mime = Mime.HTML, .body = "404 Not Found", @@ -114,221 +203,1002 @@ fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: break :route; } - if (p.data.response.status == .Kill) { + if (p.response.status == .Kill) { return .kill; } return try raw_respond(p); } -pub fn recv_fn( - runtime: *Runtime, - trigger: TaskFn, - provision: *Provision, - p_config: *const ProtocolConfig, - z_config: *const zzzConfig, - recv_buffer: []const u8, -) RecvStatus { - _ = z_config; - - var stage = provision.data.stage; - const job = provision.job.recv; - - if (job.count >= p_config.size_request_max) { - provision.data.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); +pub fn Server( + comptime security: Security, + comptime async_type: AsyncIOType, +) type { + const TLSContextType = comptime if (security == .tls) TLSContext else void; + const TLSType = comptime if (security == .tls) ?TLS else void; + const Tardy = TardyCreator(async_type); - return raw_respond(provision) catch unreachable; - } + return struct { + const Self = @This(); + allocator: std.mem.Allocator, + tardy: Tardy, + config: ServerConfig, + addr: std.net.Address, + tls_ctx: TLSContextType, - switch (stage) { - .header => { - const start = provision.recv_buffer.items.len -| 4; - provision.recv_buffer.appendSlice(recv_buffer) catch unreachable; - const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); + pub fn init(config: ServerConfig) Self { + const tls_ctx = switch (comptime security) { + .tls => |inner| TLSContext.init(.{ + .allocator = config.allocator, + .cert = inner.cert, + .cert_name = inner.cert_name, + .key = inner.key, + .key_name = inner.key_name, + .size_tls_buffer_max = config.size_socket_buffer * 2, + }) catch unreachable, + .plain => void{}, + }; - // Basically, this means we haven't finished processing the header. - if (header_ends == null) { - log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); - return .recv; + return Self{ + .allocator = config.allocator, + .tardy = Tardy.init(.{ + .allocator = config.allocator, + .threading = config.threading, + .size_tasks_max = config.size_connections_max, + .size_aio_jobs_max = config.size_connections_max, + .size_aio_reap_max = config.size_completions_reap_max, + }) catch unreachable, + .config = config, + .addr = undefined, + .tls_ctx = tls_ctx, + }; + } + + pub fn deinit(self: *Self) void { + if (comptime security == .tls) { + self.tls_ctx.deinit(); } - log.debug("{d} - parsing header", .{provision.index}); - // The +4 is to account for the slice we match. - const header_end: u32 = @intCast(header_ends.? + 4); - provision.data.request.parse_headers(provision.recv_buffer.items[0..header_end]) catch |e| { - switch (e) { - HTTPError.ContentTooLarge => { - provision.data.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); - }, - HTTPError.TooManyHeaders => { - provision.data.response.set(.{ - .status = .@"Request Header Fields Too Large", - .mime = Mime.HTML, - .body = "Too Many Headers", - }); - }, - HTTPError.MalformedRequest => { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Malformed Request", + self.tardy.deinit(); + } + + fn create_socket(self: *const Self) !std.posix.socket_t { + const socket: std.posix.socket_t = blk: { + const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; + break :blk try std.posix.socket( + self.addr.any.family, + socket_flags, + std.posix.IPPROTO.TCP, + ); + }; + + log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); + + if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT_LB, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEADDR, + &std.mem.toBytes(@as(c_int, 1)), + ); + } + + try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); + return socket; + } + + /// If you are using a custom implementation that does NOT rely + /// on TCP/IP, you can SKIP calling this method and just set the + /// socket value yourself. + /// + /// This is only allowed on certain targets that do not have TCP/IP + /// support. + pub fn bind(self: *Self, host: []const u8, port: u16) !void { + assert(host.len > 0); + assert(port > 0); + + self.addr = blk: { + switch (comptime builtin.os.tag) { + .windows => break :blk try std.net.Address.parseIp(host, port), + else => break :blk try std.net.Address.resolveIp(host, port), + } + }; + } + + fn close_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + assert(provision.job == .close); + const server_socket = rt.storage.get("server_socket", std.posix.socket_t); + const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); + const config = rt.storage.get_const_ptr("config", ServerConfig); + + log.info("{d} - closing connection", .{provision.index}); + + if (comptime security == .tls) { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + tls_ptr.*.?.deinit(); + tls_ptr.* = null; + } + + provision.socket = Cross.socket.INVALID_SOCKET; + provision.job = .empty; + _ = provision.arena.reset(.{ .retain_with_limit = config.size_connection_arena_retain }); + provision.response.clear(); + + // TODO: new config setting here! + if (provision.recv_buffer.items.len > 1024) { + provision.recv_buffer.shrinkRetainingCapacity(1024); + } else { + provision.recv_buffer.clearRetainingCapacity(); + } + + pool.release(provision.index); + + const accept_queued = rt.storage.get_ptr("accept_queued", bool); + if (!accept_queued.*) { + accept_queued.* = true; + try rt.net.accept(.{ + .socket = server_socket, + .func = accept_task, + }); + } + } + + fn accept_task(rt: *Runtime, t: *const Task, _: ?*anyopaque) !void { + const child_socket = t.result.?.socket; + + const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); + const accept_queued = rt.storage.get_ptr("accept_queued", bool); + accept_queued.* = false; + + if (rt.scheduler.tasks.clean() >= 2) { + accept_queued.* = true; + const server_socket = rt.storage.get("server_socket", std.posix.socket_t); + try rt.net.accept(.{ + .socket = server_socket, + .func = accept_task, + }); + } + + if (!Cross.socket.is_valid(child_socket)) { + log.err("socket accept failed", .{}); + return error.AcceptFailed; + } + + // This should never fail. It means that we have a dangling item. + assert(pool.clean() > 0); + const borrowed = pool.borrow_hint(t.index) catch unreachable; + + log.info("{d} - accepting connection", .{borrowed.index}); + log.debug( + "empty provision slots: {d}", + .{pool.items.len - pool.dirty.count()}, + ); + assert(borrowed.item.job == .empty); + + try Cross.socket.disable_nagle(child_socket); + try Cross.socket.to_nonblock(child_socket); + + const provision = borrowed.item; + + // Store the index of this item. + provision.index = @intCast(borrowed.index); + provision.socket = child_socket; + + switch (comptime security) { + .tls => |_| { + const tls_ctx = rt.storage.get_const_ptr("tls_ctx", TLSContextType); + const tls_slice = rt.storage.get("tls_slice", []TLSType); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* == null); + + tls_ptr.* = tls_ctx.create(child_socket) catch |e| { + log.err("{d} - tls creation failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, }); - }, - HTTPError.URITooLong => { - provision.data.response.set(.{ - .status = .@"URI Too Long", - .mime = Mime.HTML, - .body = "URI Too Long", + return error.TLSCreationFailed; + }; + + const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { + log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, }); + return error.TLSStartHandshakeFailed; + }; + + provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; + try rt.net.recv(.{ + .socket = child_socket, + .buffer = recv_buf, + .func = handshake_task, + .ctx = borrowed.item, + }); + }, + .plain => { + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv(.{ + .socket = child_socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = borrowed.item, + }); + }, + } + } + + /// This is the task you MUST trigger if the `recv_fn` returns `.spawned`. + fn trigger_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + + switch (provision.job) { + else => unreachable, + .recv => { + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + }, + .send => |*send_job| { + const config = rt.storage.get_const_ptr("config", ServerConfig); + const plain_buffer = send_job.slice.get(0, config.size_socket_buffer); + + switch (comptime security) { + .tls => |_| { + const tls_slice: []TLSType = @as( + [*]TLSType, + @ptrCast(@alignCast(rt.storage.get("tls_slice").?)), + )[0..config.size_connections_max]; + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .fd = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSEncryptFailed; + }; + + send_job.count = plain_buffer.len; + send_job.security = .{ + .tls = .{ + .encrypted = encrypted_buffer, + .encrypted_count = 0, + }, + }; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = encrypted_buffer, + .func = send_task, + .ctx = provision, + }); + }, + .plain => { + send_job.security = .plain; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = plain_buffer, + .func = send_task, + .ctx = provision, + }); + }, + } + }, + } + } + + fn recv_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + assert(provision.job == .recv); + const length: i32 = t.result.?.value; + + const config = rt.storage.get_const_ptr("config", ServerConfig); + + const recv_job = &provision.job.recv; + + // If the socket is closed. + if (length <= 0) { + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return; + } + + log.debug("{d} - recv triggered", .{provision.index}); + + const recv_count: usize = @intCast(length); + recv_job.count += recv_count; + const pre_recv_buffer = provision.buffer[0..recv_count]; + + const recv_buffer = blk: { + switch (comptime security) { + .tls => |_| { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { + log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSDecryptFailed; + }; }, - HTTPError.InvalidMethod => { - provision.data.response.set(.{ - .status = .@"Not Implemented", - .mime = Mime.HTML, - .body = "Not Implemented", + .plain => break :blk pre_recv_buffer, + } + }; + + var status: RecvStatus = status: { + var stage = provision.stage; + const job = provision.job.recv; + + if (job.count >= config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + + break :status raw_respond(provision) catch unreachable; + } + + switch (stage) { + .header => { + const start = provision.recv_buffer.items.len -| 4; + provision.recv_buffer.appendSlice(recv_buffer) catch unreachable; + const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); + + // Basically, this means we haven't finished processing the header. + if (header_ends == null) { + log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); + break :status .recv; + } + + log.debug("{d} - parsing header", .{provision.index}); + // The +4 is to account for the slice we match. + const header_end: u32 = @intCast(header_ends.? + 4); + provision.request.parse_headers(provision.recv_buffer.items[0..header_end]) catch |e| { + switch (e) { + HTTPError.ContentTooLarge => { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + }, + HTTPError.TooManyHeaders => { + provision.response.set(.{ + .status = .@"Request Header Fields Too Large", + .mime = Mime.HTML, + .body = "Too Many Headers", + }); + }, + HTTPError.MalformedRequest => { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Malformed Request", + }); + }, + HTTPError.URITooLong => { + provision.response.set(.{ + .status = .@"URI Too Long", + .mime = Mime.HTML, + .body = "URI Too Long", + }); + }, + HTTPError.InvalidMethod => { + provision.response.set(.{ + .status = .@"Not Implemented", + .mime = Mime.HTML, + .body = "Not Implemented", + }); + }, + HTTPError.HTTPVersionNotSupported => { + provision.response.set(.{ + .status = .@"HTTP Version Not Supported", + .mime = Mime.HTML, + .body = "HTTP Version Not Supported", + }); + }, + } + + break :status raw_respond(provision) catch unreachable; + }; + + // Logging information about Request. + log.info("{d} - \"{s} {s}\" {s}", .{ + provision.index, + @tagName(provision.request.method), + provision.request.uri, + provision.request.headers.get("User-Agent") orelse "N/A", }); + + // HTTP/1.1 REQUIRES a Host header to be present. + const is_http_1_1 = provision.request.version == .@"HTTP/1.1"; + const is_host_present = provision.request.headers.get("Host") != null; + if (is_http_1_1 and !is_host_present) { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Missing \"Host\" Header", + }); + + break :status raw_respond(provision) catch unreachable; + } + + if (!provision.request.expect_body()) { + break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + } + + // Everything after here is a Request that is expecting a body. + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + break :blk 0; + }; + + break :blk std.fmt.parseInt(u32, length_string, 10) catch { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "", + }); + + break :status raw_respond(provision) catch unreachable; + }; + }; + + if (header_end < provision.recv_buffer.items.len) { + const difference = provision.recv_buffer.items.len - header_end; + if (difference == content_length) { + // Whole Body + log.debug("{d} - got whole body with header", .{provision.index}); + const body_end = header_end + difference; + provision.request.set_body(provision.recv_buffer.items[header_end..body_end]); + break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + } else { + // Partial Body + log.debug("{d} - got partial body with header", .{provision.index}); + stage = .{ .body = header_end }; + break :status .recv; + } + } else if (header_end == provision.recv_buffer.items.len) { + // Body of length 0 probably or only got header. + if (content_length == 0) { + log.debug("{d} - got body of length 0", .{provision.index}); + // Body of Length 0. + provision.request.set_body(""); + break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + } else { + // Got only header. + log.debug("{d} - got all header aka no body", .{provision.index}); + stage = .{ .body = header_end }; + break :status .recv; + } + } else unreachable; }, - HTTPError.HTTPVersionNotSupported => { - provision.data.response.set(.{ - .status = .@"HTTP Version Not Supported", - .mime = Mime.HTML, - .body = "HTTP Version Not Supported", - }); + + .body => |header_end| { + // We should ONLY be here if we expect there to be a body. + assert(provision.request.expect_body()); + log.debug("{d} - body matching trigger_tasked", .{provision.index}); + + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + provision.response.set(.{ + .status = .@"Length Required", + .mime = Mime.HTML, + .body = "", + }); + + break :status raw_respond(provision) catch unreachable; + }; + + break :blk std.fmt.parseInt(u32, length_string, 10) catch { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "", + }); + + break :status raw_respond(provision) catch unreachable; + }; + }; + + const request_length = header_end + content_length; + + // If this body will be too long, abort early. + if (request_length > config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "", + }); + break :status raw_respond(provision) catch unreachable; + } + + if (job.count >= request_length) { + provision.request.set_body(provision.recv_buffer.items[header_end..request_length]); + break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + } else { + break :status .recv; + } }, } - - return raw_respond(provision) catch unreachable; }; - // Logging information about Request. - log.info("{d} - \"{s} {s}\" {s}", .{ - provision.index, - @tagName(provision.data.request.method), - provision.data.request.uri, - provision.data.request.headers.get("User-Agent") orelse "N/A", - }); - - // HTTP/1.1 REQUIRES a Host header to be present. - const is_http_1_1 = provision.data.request.version == .@"HTTP/1.1"; - const is_host_present = provision.data.request.headers.get("Host") != null; - if (is_http_1_1 and !is_host_present) { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Missing \"Host\" Header", - }); + switch (status) { + .spawned => return, + .kill => { + rt.stop(); + return error.Killed; + }, + .recv => { + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + }, + .send => |*pslice| { + const plain_buffer = pslice.get(0, config.size_socket_buffer); - return raw_respond(provision) catch unreachable; - } + switch (comptime security) { + .tls => |_| { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSEncryptFailed; + }; - if (!provision.data.request.expect_body()) { - return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; + provision.job = .{ + .send = .{ + .slice = pslice.*, + .count = @intCast(plain_buffer.len), + .security = .{ + .tls = .{ + .encrypted = encrypted_buffer, + .encrypted_count = 0, + }, + }, + }, + }; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = encrypted_buffer, + .func = send_task, + .ctx = provision, + }); + }, + .plain => { + provision.job = .{ + .send = .{ + .slice = pslice.*, + .count = 0, + .security = .plain, + }, + }; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = plain_buffer, + .func = send_task, + .ctx = provision, + }); + }, + } + }, } + } - // Everything after here is a Request that is expecting a body. - const content_length = blk: { - const length_string = provision.data.request.headers.get("Content-Length") orelse { - break :blk 0; - }; + fn handshake_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { + log.debug("Handshake Task", .{}); + assert(security == .tls); + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + const length: i32 = t.result.?.value; - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); + if (comptime security == .tls) { + const tls_slice = rt.storage.get("tls_slice", []TLSType); - return raw_respond(provision) catch unreachable; - }; - }; + assert(provision.job == .handshake); + const handshake_job = &provision.job.handshake; - if (header_end < provision.recv_buffer.items.len) { - const difference = provision.recv_buffer.items.len - header_end; - if (difference == content_length) { - // Whole Body - log.debug("{d} - got whole body with header", .{provision.index}); - const body_end = header_end + difference; - provision.data.request.set_body(provision.recv_buffer.items[header_end..body_end]); - return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; - } else { - // Partial Body - log.debug("{d} - got partial body with header", .{provision.index}); - stage = .{ .body = header_end }; - return .recv; + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + log.debug("processing handshake", .{}); + handshake_job.count += 1; + + if (length <= 0) { + log.debug("handshake connection closed", .{}); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSHandshakeClosed; } - } else if (header_end == provision.recv_buffer.items.len) { - // Body of length 0 probably or only got header. - if (content_length == 0) { - log.debug("{d} - got body of length 0", .{provision.index}); - // Body of Length 0. - provision.data.request.set_body(""); - return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; - } else { - // Got only header. - log.debug("{d} - got all header aka no body", .{provision.index}); - stage = .{ .body = header_end }; - return .recv; + + if (handshake_job.count >= 50) { + log.debug("handshake taken too many cycles", .{}); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSHandshakeTooManyCycles; } - } else unreachable; - }, - .body => |header_end| { - // We should ONLY be here if we expect there to be a body. - assert(provision.data.request.expect_body()); - log.debug("{d} - body matching triggered", .{provision.index}); + const hs_length: usize = @intCast(length); - const content_length = blk: { - const length_string = provision.data.request.headers.get("Content-Length") orelse { - provision.data.response.set(.{ - .status = .@"Length Required", - .mime = Mime.HTML, - .body = "", - }); + switch (handshake_job.state) { + .recv => { + // on recv, we want to read from socket and feed into tls engien + const hstate = tls_ptr.*.?.continue_handshake( + .{ .recv = @intCast(hs_length) }, + ) catch |e| { + log.err("{d} - tls handshake on recv failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSHandshakeRecvFailed; + }; - return raw_respond(provision) catch unreachable; - }; + switch (hstate) { + .recv => |buf| { + log.debug("requeing recv in handshake", .{}); + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = buf, + .func = handshake_task, + .ctx = provision, + }); + }, + .send => |buf| { + log.debug("queueing send in handshake", .{}); + handshake_job.state = .send; + try rt.net.send(.{ + .socket = provision.socket, + .buffer = buf, + .func = handshake_task, + .ctx = provision, + }); + }, + .complete => { + log.debug("handshake complete", .{}); + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + }, + } + }, + .send => { + // on recv, we want to read from socket and feed into tls engien + const hstate = tls_ptr.*.?.continue_handshake( + .{ .send = @intCast(hs_length) }, + ) catch |e| { + log.err("{d} - tls handshake on send failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSHandshakeSendFailed; + }; - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.data.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); + switch (hstate) { + .recv => |buf| { + handshake_job.state = .recv; + log.debug("queuing recv in handshake", .{}); + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = buf, + .func = handshake_task, + .ctx = provision, + }); + }, + .send => |buf| { + log.debug("requeing send in handshake", .{}); + try rt.net.send(.{ + .socket = provision.socket, + .buffer = buf, + .func = handshake_task, + .ctx = provision, + }); + }, + .complete => { + log.debug("handshake complete", .{}); + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + }, + } + }, + } + } else unreachable; + } - return raw_respond(provision) catch unreachable; - }; - }; + fn send_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { + const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + assert(provision.job == .send); + const length: i32 = t.result.?.value; - const request_length = header_end + content_length; + const config = rt.storage.get_const_ptr("config", ServerConfig); - // If this body will be too long, abort early. - if (request_length > p_config.size_request_max) { - provision.data.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "", + // If the socket is closed. + if (length <= 0) { + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, }); - return raw_respond(provision) catch unreachable; + return; } - if (job.count >= request_length) { - provision.data.request.set_body(provision.recv_buffer.items[header_end..request_length]); - return route_and_respond(runtime, trigger, provision, p_config.router) catch unreachable; - } else { - return .recv; + const send_job = &provision.job.send; + + log.debug("{d} - send triggered", .{provision.index}); + const send_count: usize = @intCast(length); + log.debug("{d} - send length: {d}", .{ provision.index, send_count }); + + switch (comptime security) { + .tls => { + assert(send_job.security == .tls); + + const tls_slice = rt.storage.get("tls_slice", []TLSType); + + const job_tls = &send_job.security.tls; + job_tls.encrypted_count += send_count; + + if (job_tls.encrypted_count >= job_tls.encrypted.len) { + if (send_job.count >= send_job.slice.len) { + // All done sending. + log.debug("{d} - queueing a new recv", .{provision.index}); + _ = provision.arena.reset(.{ + .retain_with_limit = config.size_connection_arena_retain, + }); + provision.recv_buffer.clearRetainingCapacity(); + provision.job = .{ .recv = .{ .count = 0 } }; + + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + } else { + // Queue a new chunk up for sending. + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); + + const inner_slice = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); + + send_job.count += @intCast(inner_slice.len); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(.{ + .socket = provision.socket, + .func = close_task, + .ctx = provision, + }); + return error.TLSEncryptFailed; + }; + + job_tls.encrypted = encrypted; + job_tls.encrypted_count = 0; + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = job_tls.encrypted, + .func = send_task, + .ctx = provision, + }); + } + } else { + log.debug( + "{d} - sending next encrypted chunk starting at index {d}", + .{ provision.index, job_tls.encrypted_count }, + ); + + const remainder = job_tls.encrypted[job_tls.encrypted_count..]; + try rt.net.send(.{ + .socket = provision.socket, + .buffer = remainder, + .func = send_task, + .ctx = provision, + }); + } + }, + .plain => { + assert(send_job.security == .plain); + send_job.count += send_count; + + if (send_job.count >= send_job.slice.len) { + log.debug("{d} - queueing a new recv", .{provision.index}); + _ = provision.arena.reset(.{ + .retain_with_limit = config.size_connection_arena_retain, + }); + provision.recv_buffer.clearRetainingCapacity(); + provision.job = .{ .recv = .{ .count = 0 } }; + + try rt.net.recv(.{ + .socket = provision.socket, + .buffer = provision.buffer, + .func = recv_task, + .ctx = provision, + }); + } else { + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); + + const plain_buffer = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); + + log.debug("{d} - chunk ends at: {d}", .{ + provision.index, + plain_buffer.len + send_job.count, + }); + + try rt.net.send(.{ + .socket = provision.socket, + .buffer = plain_buffer, + .func = send_task, + .ctx = provision, + }); + } + }, } - }, - } -} + } + + pub fn listen(self: *Self) !void { + log.info("server listening...", .{}); + log.info("security mode: {s}", .{@tagName(security)}); + + try self.tardy.entry( + struct { + fn rt_start(rt: *Runtime, alloc: std.mem.Allocator, zzz: *Self) !void { + const socket = try zzz.create_socket(); + try std.posix.listen(socket, zzz.config.size_backlog); + + const provision_pool = try alloc.create(Pool(Provision)); + provision_pool.* = try Pool(Provision).init( + alloc, + zzz.config.size_connections_max, + Provision.init_hook, + zzz.config, + ); + + try rt.storage.store_ptr("provision_pool", provision_pool); + try rt.storage.store_ptr("config", &zzz.config); + + if (comptime security == .tls) { + const tls_slice = try alloc.alloc( + TLSType, + zzz.config.size_connections_max, + ); + if (comptime security == .tls) { + for (tls_slice) |*tls| { + tls.* = null; + } + } -pub fn Server(comptime security: Security, comptime async_type: AsyncIOType) type { - return zzzServer(security, async_type, ProtocolData, ProtocolConfig, recv_fn); + // since slices are fat pointers... + try rt.storage.store_alloc("tls_slice", tls_slice); + try rt.storage.store_ptr("tls_ctx", zzz.tls_ctx); + } + + try rt.storage.store_alloc("server_socket", socket); + try rt.storage.store_alloc("accept_queued", true); + + try rt.net.accept(.{ + .socket = socket, + .func = accept_task, + }); + } + }.rt_start, + self, + struct { + fn rt_end(rt: *Runtime, alloc: std.mem.Allocator, _: anytype) void { + // clean up socket. + const server_socket = rt.storage.get("server_socket", std.posix.socket_t); + std.posix.close(server_socket); + + // clean up provision pool. + const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); + provision_pool.deinit(Provision.deinit_hook, alloc); + alloc.destroy(provision_pool); + + // clean up TLS. + if (comptime security == .tls) { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + alloc.free(tls_slice); + } + } + }.rt_end, + void, + ); + } + }; } From c8487e5322ff74977780ff020fb09e1fa9d9c7f7 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sat, 26 Oct 2024 15:38:59 -0700 Subject: [PATCH 04/17] feat(tardy): use new context style --- build.zig.zon | 5 +- examples/http/basic/main.zig | 11 +- examples/http/benchmark/main.zig | 16 +- examples/http/fs/main.zig | 24 +- examples/http/minram/main.zig | 23 +- examples/http/multithread/main.zig | 25 +- examples/http/tls/main.zig | 43 +- examples/http/valgrind/main.zig | 18 +- src/http/context.zig | 91 ++-- src/http/lib.zig | 4 - src/http/route.zig | 208 ++++---- src/http/router.zig | 468 +++++++++--------- src/http/routing_trie.zig | 766 +++++++++++++++-------------- src/http/server.zig | 746 +++++++++++++--------------- 14 files changed, 1201 insertions(+), 1247 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index d907180..e203715 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,8 +4,9 @@ .minimum_zig_version = "0.13.0", .dependencies = .{ .tardy = .{ - .url = "git+https://github.com/mookums/tardy#12a2bcae25b34c4eb34ab5e3b5db101823a61cd6", - .hash = "122073200a2412251ad1e7eb322d9d04868a1444f98bdb4d47bb630491806c8d36d4", + //.url = "git+https://github.com/mookums/tardy#12a2bcae25b34c4eb34ab5e3b5db101823a61cd6", + //.hash = "122073200a2412251ad1e7eb322d9d04868a1444f98bdb4d47bb630491806c8d36d4", + .path = "../tardy", }, .bearssl = .{ .url = "https://github.com/mookums/bearssl-zig/archive/37a96eee56fe2543579bbc6da148ca886f3dd32b.tar.gz", diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index 9181d97..2e25101 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -3,6 +3,11 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/basic"); +const Server = http.Server(.plain, .auto); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; + pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; @@ -11,11 +16,11 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { const body = \\ \\ diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index cba7b61..6e289d4 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -3,11 +3,16 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/benchmark"); +const Server = http.Server(.plain, .auto); +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + pub const std_options = .{ .log_level = .err, }; -fn hi_handler(ctx: *http.Context) void { +fn hi_handler(ctx: *Context) void { const name = ctx.captures[0].string; const body = std.fmt.allocPrint(ctx.allocator, @@ -50,17 +55,18 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", http.Route.init().get(hi_handler)); + try router.serve_route("/hi/%s", Route.init().get(hi_handler)); - var server = http.Server(.plain, .auto).init(.{ + var server = Server.init(.{ + .router = &router, .allocator = allocator, .threading = .auto, }); defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig index 13bf4f3..63b332b 100644 --- a/examples/http/fs/main.zig +++ b/examples/http/fs/main.zig @@ -3,19 +3,26 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/fs"); +const Server = http.Server(.plain, .auto); +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true }){ .backing_allocator = std.heap.c_allocator }; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { const body = \\ \\ @@ -33,8 +40,8 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/kill", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { ctx.runtime.stop(); ctx.respond(.{ @@ -47,7 +54,8 @@ pub fn main() !void { try router.serve_fs_dir("/static", "./examples/http/fs/static"); - var server = http.Server(.plain, .auto).init(.{ + var server = Server.init(.{ + .router = &router, .allocator = allocator, .threading = .auto, .size_connections_max = 256, @@ -55,5 +63,5 @@ pub fn main() !void { defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig index df23810..6b56221 100644 --- a/examples/http/minram/main.zig +++ b/examples/http/minram/main.zig @@ -3,6 +3,11 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/minram"); +const Server = http.Server(.plain, .auto); +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; @@ -13,11 +18,11 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { const body = \\ \\ @@ -35,7 +40,8 @@ pub fn main() !void { } }.handler_fn)); - var server = http.Server(.plain, .auto).init(.{ + var server = Server.init(.{ + .router = &router, .allocator = allocator, .threading = .single, .size_backlog = 32, @@ -43,14 +49,13 @@ pub fn main() !void { .size_connection_arena_retain = 64, .size_completions_reap_max = 8, .size_socket_buffer = 512, - }); - - try server.bind(host, port); - try server.listen(.{ - .router = &router, .num_header_max = 32, .num_captures_max = 0, .size_request_max = 2048, .size_request_uri_max = 256, }); + defer server.deinit(); + + try server.bind(host, port); + try server.listen(); } diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig index 7ba9245..0a8b864 100644 --- a/examples/http/multithread/main.zig +++ b/examples/http/multithread/main.zig @@ -3,7 +3,13 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/multithread"); -fn hi_handler(ctx: *http.Context) void { +const Server = http.Server(.plain, .auto); + +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + +fn hi_handler(ctx: *Context) void { const name = ctx.captures[0].string; const greeting = ctx.queries.get("greeting") orelse "Hi"; @@ -40,7 +46,7 @@ fn hi_handler(ctx: *http.Context) void { }); } -fn redir_handler(ctx: *http.Context) void { +fn redir_handler(ctx: *Context) void { ctx.response.headers.add("Location", "/hi/redirect") catch unreachable; ctx.respond(.{ .status = .@"Permanent Redirect", @@ -49,7 +55,7 @@ fn redir_handler(ctx: *http.Context) void { }); } -fn post_handler(ctx: *http.Context) void { +fn post_handler(ctx: *Context) void { log.debug("Body: {s}", .{ctx.request.body}); ctx.respond(.{ @@ -70,20 +76,21 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", http.Route.init().get(hi_handler)); - try router.serve_route("/redirect", http.Route.init().get(redir_handler)); - try router.serve_route("/post", http.Route.init().post(post_handler)); + try router.serve_route("/hi/%s", Route.init().get(hi_handler)); + try router.serve_route("/redirect", Route.init().get(redir_handler)); + try router.serve_route("/post", Route.init().post(post_handler)); - var server = http.Server(.plain, .auto).init(.{ + var server = Server.init(.{ + .router = &router, .allocator = allocator, .threading = .auto, }); defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig index 738fbf1..219255b 100644 --- a/examples/http/tls/main.zig +++ b/examples/http/tls/main.zig @@ -2,6 +2,20 @@ const std = @import("std"); const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/tls"); + +const Server = http.Server(.{ + .tls = .{ + .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, + .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, + .cert_name = "CERTIFICATE", + .key_name = "EC PRIVATE KEY", + }, +}, .auto); + +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; @@ -12,13 +26,13 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { const body = \\ \\ @@ -39,8 +53,8 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/kill", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { ctx.respond(.{ .status = .Kill, .mime = http.Mime.HTML, @@ -49,26 +63,13 @@ pub fn main() !void { } }.handler_fn)); - var server = http.Server( - .{ - .tls = .{ - .cert = .{ - .file = .{ .path = "./examples/http/tls/certs/cert.pem" }, - }, - .key = .{ - .file = .{ .path = "./examples/http/tls/certs/key.pem" }, - }, - .cert_name = "CERTIFICATE", - .key_name = "EC PRIVATE KEY", - }, - }, - .auto, - ).init(.{ + var server = Server.init(.{ + .router = &router, .allocator = allocator, .threading = .single, }); defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig index 89d313c..5d6d3a6 100644 --- a/examples/http/valgrind/main.zig +++ b/examples/http/valgrind/main.zig @@ -3,6 +3,11 @@ const zzz = @import("zzz"); const http = zzz.HTTP; const log = std.log.scoped(.@"examples/valgrind"); +const Server = http.Server(.plain, .auto); +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; + pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; @@ -11,11 +16,11 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { const body = \\ \\ @@ -33,8 +38,8 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(ctx: *http.Context) void { + try router.serve_route("/kill", Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { ctx.respond(.{ .status = .Kill, .mime = http.Mime.HTML, @@ -44,11 +49,12 @@ pub fn main() !void { }.handler_fn)); var server = http.Server(.plain, .auto).init(.{ + .router = &router, .allocator = allocator, .threading = .single, }); defer server.deinit(); try server.bind(host, port); - try server.listen(.{ .router = &router }); + try server.listen(); } diff --git a/src/http/context.zig b/src/http/context.zig index d3356f0..9bd3c5d 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -4,7 +4,6 @@ const log = std.log.scoped(.@"zzz/http/context"); const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; - const Provision = @import("provision.zig").Provision; const Request = @import("request.zig").Request; @@ -13,69 +12,47 @@ const ResponseSetOptions = Response.ResponseSetOptions; const Runtime = @import("tardy").Runtime; const Task = @import("tardy").Task; -// Needed here to prevent a dependency loop. -const TaskFn = *const fn (*Runtime, *const Task, ?*anyopaque) anyerror!void; const raw_respond = @import("server.zig").raw_respond; -pub const Context = struct { - allocator: std.mem.Allocator, - trigger: TaskFn, - runtime: *Runtime, - /// The Request that triggered this handler. - request: *const Request, - /// The Response that will be returned. - /// To actually trigger the send, use `Context.respond`. - response: *Response, - path: []const u8, - captures: []Capture, - queries: *QueryMap, - provision: *Provision, - triggered: bool = false, - - pub fn init( +// Context is dependent on the server that gets created. +// This is because the trigger_task ends up being dependent. +pub fn Context(comptime Server: type) type { + return struct { + const Self = @This(); allocator: std.mem.Allocator, - trigger: TaskFn, runtime: *Runtime, - ctx: *Provision, + /// The Request that triggered this handler. request: *const Request, + /// The Response that will be returned. response: *Response, path: []const u8, captures: []Capture, queries: *QueryMap, - ) Context { - return Context{ - .allocator = allocator, - .trigger = trigger, - .runtime = runtime, - .provision = ctx, - .request = request, - .response = response, - .path = path, - .captures = captures, - .queries = queries, - }; - } - - pub fn respond(self: *Context, options: ResponseSetOptions) void { - assert(!self.triggered); - self.triggered = true; - self.response.set(options); - - // this will write the data into the appropriate places. - const status = raw_respond(self.provision) catch unreachable; - - self.provision.job = .{ - .send = .{ - .count = 0, - .slice = status.send, - .security = undefined, - }, - }; - - self.runtime.spawn(.{ - .func = self.trigger, - .ctx = self.provision, - }) catch unreachable; - } -}; + provision: *Provision, + triggered: bool = false, + + pub fn respond(self: *Self, options: ResponseSetOptions) void { + assert(!self.triggered); + self.triggered = true; + self.response.set(options); + + // this will write the data into the appropriate places. + const status = raw_respond(self.provision) catch unreachable; + + self.provision.job = .{ + .send = .{ + .count = 0, + .slice = status.send, + .security = undefined, + }, + }; + + self.runtime.spawn( + *Provision, + Server.trigger_task, + self.provision, + ) catch unreachable; + } + }; +} diff --git a/src/http/lib.zig b/src/http/lib.zig index 468cdd1..e3e9b3a 100644 --- a/src/http/lib.zig +++ b/src/http/lib.zig @@ -4,10 +4,6 @@ pub const Request = @import("request.zig").Request; pub const Response = @import("response.zig").Response; pub const Mime = @import("mime.zig").Mime; pub const Date = @import("date.zig").Date; -pub const Route = @import("route.zig").Route; -pub const Router = @import("router.zig").Router; -pub const RouteHandlerFn = @import("route.zig").RouteHandlerFn; -pub const Context = @import("context.zig").Context; pub const Headers = @import("headers.zig").Headers; pub const Server = @import("server.zig").Server; diff --git a/src/http/route.zig b/src/http/route.zig index 2b63d6a..d00d09b 100644 --- a/src/http/route.zig +++ b/src/http/route.zig @@ -6,112 +6,114 @@ const Response = @import("response.zig").Response; const Context = @import("context.zig").Context; -pub const RouteHandlerFn = *const fn (context: *Context) void; - -pub const Route = struct { - handlers: [9]?RouteHandlerFn = [_]?RouteHandlerFn{null} ** 9, - - fn method_to_index(method: Method) u32 { - return switch (method) { - .GET => 0, - .HEAD => 1, - .POST => 2, - .PUT => 3, - .DELETE => 4, - .CONNECT => 5, - .OPTIONS => 6, - .TRACE => 7, - .PATCH => 8, - }; - } - - pub fn init() Route { - return Route{ .handlers = [_]?RouteHandlerFn{null} ** 9 }; - } - - /// Returns a comma delinated list of allowed Methods for this route. This - /// is meant to be used as the value for the 'Allow' header in the Response. - pub fn get_allowed(self: Route, allocator: std.mem.Allocator) ![]const u8 { - // This gets allocated within the context of the connection's arena. - const allowed_size = comptime blk: { - var size = 0; - for (std.meta.tags(Method)) |method| { - size += @tagName(method).len + 1; - } - break :blk size; - }; +pub fn Route(comptime Server: type) type { + return struct { + const Self = @This(); + pub const HandlerFn = *const fn (context: *Context(Server)) void; + handlers: [9]?HandlerFn = [_]?HandlerFn{null} ** 9, + + fn method_to_index(method: Method) u32 { + return switch (method) { + .GET => 0, + .HEAD => 1, + .POST => 2, + .PUT => 3, + .DELETE => 4, + .CONNECT => 5, + .OPTIONS => 6, + .TRACE => 7, + .PATCH => 8, + }; + } + + pub fn init() Self { + return Self{ .handlers = [_]?HandlerFn{null} ** 9 }; + } - const buffer = try allocator.alloc(u8, allowed_size); + /// Returns a comma delinated list of allowed Methods for this route. This + /// is meant to be used as the value for the 'Allow' header in the Response. + pub fn get_allowed(self: Self, allocator: std.mem.Allocator) ![]const u8 { + // This gets allocated within the context of the connection's arena. + const allowed_size = comptime blk: { + var size = 0; + for (std.meta.tags(Method)) |method| { + size += @tagName(method).len + 1; + } + break :blk size; + }; + + const buffer = try allocator.alloc(u8, allowed_size); + + var current: []u8 = ""; + inline for (std.meta.tags(Method)) |method| { + if (self.handlers[@intFromEnum(method)] != null) { + current = std.fmt.bufPrint(buffer, "{s},{s}", .{ @tagName(method), current }) catch unreachable; + } + } - var current: []u8 = ""; - inline for (std.meta.tags(Method)) |method| { - if (self.handlers[@intFromEnum(method)] != null) { - current = std.fmt.bufPrint(buffer, "{s},{s}", .{ @tagName(method), current }) catch unreachable; + if (current.len == 0) { + return current; + } else { + return current[0 .. current.len - 1]; } } - if (current.len == 0) { - return current; - } else { - return current[0 .. current.len - 1]; + pub fn get_handler(self: Self, method: Method) ?HandlerFn { + return self.handlers[method_to_index(method)]; + } + + pub fn get(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.GET)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn head(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.HEAD)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn post(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.POST)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn put(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.PUT)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn delete(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.DELETE)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn connect(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.CONNECT)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn options(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.OPTIONS)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn trace(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.TRACE)] = handler_fn; + return Self{ .handlers = new_handlers }; + } + + pub fn patch(self: Self, handler_fn: HandlerFn) Self { + var new_handlers = self.handlers; + new_handlers[comptime method_to_index(.PATCH)] = handler_fn; + return Self{ .handlers = new_handlers }; } - } - - pub fn get_handler(self: Route, method: Method) ?RouteHandlerFn { - return self.handlers[method_to_index(method)]; - } - - pub fn get(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.GET)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn head(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.HEAD)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn post(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.POST)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn put(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PUT)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn delete(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.DELETE)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn connect(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.CONNECT)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn options(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.OPTIONS)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn trace(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.TRACE)] = handler_fn; - return Route{ .handlers = new_handlers }; - } - - pub fn patch(self: Route, handler_fn: RouteHandlerFn) Route { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PATCH)] = handler_fn; - return Route{ .handlers = new_handlers }; - } -}; + }; +} diff --git a/src/http/router.zig b/src/http/router.zig index 4824781..a4c6434 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -2,263 +2,271 @@ const std = @import("std"); const builtin = @import("builtin"); const log = std.log.scoped(.@"zzz/http/router"); const assert = std.debug.assert; -const Route = @import("route.zig").Route; + +const _Route = @import("route.zig").Route; + const Capture = @import("routing_trie.zig").Capture; -const FoundRoute = @import("routing_trie.zig").FoundRoute; const Request = @import("request.zig").Request; const Response = @import("response.zig").Response; const Mime = @import("mime.zig").Mime; -const Context = @import("context.zig").Context; +const _Context = @import("context.zig").Context; -const RoutingTrie = @import("routing_trie.zig").RoutingTrie; +const _RoutingTrie = @import("routing_trie.zig").RoutingTrie; const QueryMap = @import("routing_trie.zig").QueryMap; const Runtime = @import("tardy").Runtime; const Task = @import("tardy").Task; -pub const Router = struct { - allocator: std.mem.Allocator, - routes: RoutingTrie, - /// This makes the router immutable, also making it - /// thread-safe when shared. - locked: bool = false, - - pub fn init(allocator: std.mem.Allocator) Router { - const routes = RoutingTrie.init(allocator) catch unreachable; - return Router{ .allocator = allocator, .routes = routes, .locked = false }; - } - - pub fn deinit(self: *Router) void { - self.routes.deinit(); - } - - const FileProvision = struct { - mime: Mime, - context: *Context, - fd: std.posix.fd_t, - offset: usize, - list: std.ArrayList(u8), - buffer: []u8, - }; +pub fn Router(comptime Server: type) type { + return struct { + const Self = @This(); + const RoutingTrie = _RoutingTrie(Server); + const FoundRoute = RoutingTrie.FoundRoute; + const Route = _Route(Server); + const Context = _Context(Server); + allocator: std.mem.Allocator, + routes: RoutingTrie, + /// This makes the router immutable, also making it + /// thread-safe when shared. + locked: bool = false, + + pub fn init(allocator: std.mem.Allocator) Self { + const routes = RoutingTrie.init(allocator) catch unreachable; + return Self{ .allocator = allocator, .routes = routes, .locked = false }; + } - fn open_file_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); - errdefer { - provision.context.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); + pub fn deinit(self: *Self) void { + self.routes.deinit(); } - const fd = t.result.?.fd; - if (fd <= -1) { - provision.context.respond(.{ - .status = .@"Not Found", - .mime = Mime.HTML, - .body = "File Not Found", - }); - return; + const FileProvision = struct { + mime: Mime, + context: *Context, + fd: std.posix.fd_t, + offset: usize, + list: std.ArrayList(u8), + buffer: []u8, + }; + + fn open_file_task(rt: *Runtime, t: *const Task, provision: *FileProvision) !void { + errdefer { + provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + } + + const fd = t.result.?.fd; + if (fd <= -1) { + provision.context.respond(.{ + .status = .@"Not Found", + .mime = Mime.HTML, + .body = "File Not Found", + }); + return; + } + provision.fd = fd; + + try rt.fs.read( + *FileProvision, + read_file_task, + provision, + fd, + provision.buffer, + 0, + ); } - provision.fd = fd; - - try rt.fs.read(.{ - .fd = fd, - .buffer = provision.buffer, - .offset = 0, - .func = read_file_task, - .ctx = provision, - }); - } - - fn read_file_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); - errdefer { - provision.context.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); + + fn read_file_task(rt: *Runtime, t: *const Task, provision: *FileProvision) !void { + errdefer { + provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + } + + const result: i32 = t.result.?.value; + if (result <= 0) { + // If we are done reading... + try rt.fs.close( + *FileProvision, + close_file_task, + provision, + provision.fd, + ); + return; + } + + const length: usize = @intCast(result); + + try provision.list.appendSlice(provision.buffer[0..length]); + + // TODO: This needs to be a setting you pass in to the router. + // + //if (provision.list.items.len > 1024 * 1024 * 4) { + // provision.context.respond(.{ + // .status = .@"Content Too Large", + // .mime = Mime.HTML, + // .body = "File Too Large", + // }); + // return; + //} + + provision.offset += length; + + try rt.fs.read( + *FileProvision, + read_file_task, + provision, + provision.fd, + provision.buffer, + provision.offset, + ); } - const result: i32 = t.result.?.value; - if (result <= 0) { - // If we are done reading... - try rt.fs.close(.{ - .fd = provision.fd, - .func = close_file_task, - .ctx = provision, + fn close_file_task(_: *Runtime, _: *const Task, provision: *FileProvision) !void { + provision.context.respond(.{ + .status = .OK, + .mime = provision.mime, + .body = provision.list.items[0..], }); - return; } - const length: usize = @intCast(result); - - try provision.list.appendSlice(provision.buffer[0..length]); - - // TODO: This needs to be a setting you pass in to the router. - // - //if (provision.list.items.len > 1024 * 1024 * 4) { - // provision.context.respond(.{ - // .status = .@"Content Too Large", - // .mime = Mime.HTML, - // .body = "File Too Large", - // }); - // return; - //} - - provision.offset += length; - - try rt.fs.read(.{ - .fd = provision.fd, - .buffer = provision.buffer, - .offset = provision.offset, - .func = read_file_task, - .ctx = provision, - }); - } - - fn close_file_task(_: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *FileProvision = @ptrCast(@alignCast(ctx.?)); - - provision.context.respond(.{ - .status = .OK, - .mime = provision.mime, - .body = provision.list.items[0..], - }); - } - - pub fn serve_fs_dir(self: *Router, comptime url_path: []const u8, comptime dir_path: []const u8) !void { - assert(!self.locked); - - const route = Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { - const search_path = ctx.captures[0].remaining; - - const file_path = std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - return; - }; + pub fn serve_fs_dir(self: *Self, comptime url_path: []const u8, comptime dir_path: []const u8) !void { + assert(!self.locked); + + const route = Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { + const search_path = ctx.captures[0].remaining; + + const file_path = std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { + ctx.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + return; + }; + + // TODO: Ensure that paths cannot go out of scope and reference data that they shouldn't be allowed to. + // Very important. + + const extension_start = std.mem.lastIndexOfScalar(u8, search_path, '.'); + const mime: Mime = blk: { + if (extension_start) |start| { + break :blk Mime.from_extension(search_path[start..]); + } else { + break :blk Mime.BIN; + } + }; + + const provision = ctx.allocator.create(FileProvision) catch { + ctx.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + return; + }; + + provision.* = .{ + .mime = mime, + .context = ctx, + .fd = -1, + .offset = 0, + .list = std.ArrayList(u8).init(ctx.allocator), + .buffer = ctx.provision.buffer, + }; + + // We also need to support chunked encoding. + // It makes a lot more sense for files atleast. + ctx.runtime.fs.open( + *FileProvision, + open_file_task, + provision, + file_path, + ) catch { + ctx.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + return; + }; + } + }.handler_fn); - // TODO: Ensure that paths cannot go out of scope and reference data that they shouldn't be allowed to. - // Very important. + const url_with_match_all = comptime std.fmt.comptimePrint( + "{s}/%r", + .{std.mem.trimRight(u8, url_path, &.{'/'})}, + ); - const extension_start = std.mem.lastIndexOfScalar(u8, search_path, '.'); - const mime: Mime = blk: { - if (extension_start) |start| { - break :blk Mime.from_extension(search_path[start..]); + try self.serve_route(url_with_match_all, route); + } + + pub fn serve_embedded_file( + self: *Self, + comptime path: []const u8, + comptime mime: ?Mime, + comptime bytes: []const u8, + ) !void { + assert(!self.locked); + const route = Route.init().get(struct { + pub fn handler_fn(ctx: *Context) void { + if (comptime builtin.mode == .Debug) { + // Don't Cache in Debug. + ctx.response.headers.add( + "Cache-Control", + "no-cache", + ) catch unreachable; } else { - break :blk Mime.BIN; + // Cache for 30 days. + ctx.response.headers.add( + "Cache-Control", + comptime std.fmt.comptimePrint("max-age={d}", .{std.time.s_per_day * 30}), + ) catch unreachable; } - }; - - const provision = ctx.allocator.create(FileProvision) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - return; - }; - - provision.* = .{ - .mime = mime, - .context = ctx, - .fd = -1, - .offset = 0, - .list = std.ArrayList(u8).init(ctx.allocator), - .buffer = ctx.provision.buffer, - }; - - // We also need to support chunked encoding. - // It makes a lot more sense for files atleast. - ctx.runtime.fs.open(.{ - .path = file_path, - .func = open_file_task, - .ctx = provision, - }) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - return; - }; - } - }.handler_fn); - - const url_with_match_all = comptime std.fmt.comptimePrint( - "{s}/%r", - .{std.mem.trimRight(u8, url_path, &.{'/'})}, - ); - - try self.serve_route(url_with_match_all, route); - } - - pub fn serve_embedded_file( - self: *Router, - comptime path: []const u8, - comptime mime: ?Mime, - comptime bytes: []const u8, - ) !void { - assert(!self.locked); - const route = Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { - if (comptime builtin.mode == .Debug) { - // Don't Cache in Debug. - ctx.response.headers.add( - "Cache-Control", - "no-cache", - ) catch unreachable; - } else { - // Cache for 30 days. - ctx.response.headers.add( - "Cache-Control", - comptime std.fmt.comptimePrint("max-age={d}", .{std.time.s_per_day * 30}), - ) catch unreachable; - } - // If our static item is greater than 1KB, - // it might be more beneficial to using caching. - if (comptime bytes.len > 1024) { - @setEvalBranchQuota(1_000_000); - const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); - ctx.response.headers.add("ETag", etag[0..]) catch unreachable; - - if (ctx.request.headers.get("If-None-Match")) |match| { - if (std.mem.eql(u8, etag, match)) { - ctx.respond(.{ - .status = .@"Not Modified", - .mime = Mime.HTML, - .body = "", - }); - - return; + // If our static item is greater than 1KB, + // it might be more beneficial to using caching. + if (comptime bytes.len > 1024) { + @setEvalBranchQuota(1_000_000); + const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); + ctx.response.headers.add("ETag", etag[0..]) catch unreachable; + + if (ctx.request.headers.get("If-None-Match")) |match| { + if (std.mem.eql(u8, etag, match)) { + ctx.respond(.{ + .status = .@"Not Modified", + .mime = Mime.HTML, + .body = "", + }); + + return; + } } } - } - ctx.respond(.{ - .status = .OK, - .mime = mime, - .body = bytes, - }); - } - }.handler_fn); + ctx.respond(.{ + .status = .OK, + .mime = mime, + .body = bytes, + }); + } + }.handler_fn); - try self.serve_route(path, route); - } + try self.serve_route(path, route); + } - pub fn serve_route(self: *Router, path: []const u8, route: Route) !void { - assert(!self.locked); - try self.routes.add_route(path, route); - } + pub fn serve_route(self: *Self, path: []const u8, route: Route) !void { + assert(!self.locked); + try self.routes.add_route(path, route); + } - pub fn get_route_from_host(self: Router, host: []const u8, captures: []Capture, queries: *QueryMap) ?FoundRoute { - return self.routes.get_route(host, captures, queries); - } -}; + pub fn get_route_from_host(self: Self, host: []const u8, captures: []Capture, queries: *QueryMap) ?FoundRoute { + return self.routes.get_route(host, captures, queries); + } + }; +} diff --git a/src/http/routing_trie.zig b/src/http/routing_trie.zig index 62ba338..d2a9033 100644 --- a/src/http/routing_trie.zig +++ b/src/http/routing_trie.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/routing_trie"); const CaseStringMap = @import("case_string_map.zig").CaseStringMap; -const Route = @import("lib.zig").Route; +const _Route = @import("route.zig").Route; fn TokenHashMap(comptime V: type) type { return std.HashMap(Token, V, struct { @@ -110,429 +110,433 @@ pub const Capture = union(TokenMatch) { remaining: TokenMatch.remaining.as_type(), }; -pub const FoundRoute = struct { - route: Route, - captures: []Capture, - queries: *QueryMap, -}; - // This RoutingTrie is deleteless. It only can create new routes or update existing ones. -pub const RoutingTrie = struct { - pub const Node = struct { - allocator: std.mem.Allocator, - token: Token, - route: ?Route = null, - children: TokenHashMap(*Node), - - pub fn init(allocator: std.mem.Allocator, token: Token, route: ?Route) !*Node { - const node_ptr: *Node = try allocator.create(Node); - node_ptr.* = Node{ - .allocator = allocator, - .token = token, - .route = route, - .children = TokenHashMap(*Node).init(allocator), - }; +pub fn RoutingTrie(comptime Server: type) type { + return struct { + const Self = @This(); + const Route = _Route(Server); + + pub const FoundRoute = struct { + route: Route, + captures: []Capture, + queries: *QueryMap, + }; + pub const Node = struct { + allocator: std.mem.Allocator, + token: Token, + route: ?Route = null, + children: TokenHashMap(*Node), + + pub fn init(allocator: std.mem.Allocator, token: Token, route: ?Route) !*Node { + const node_ptr: *Node = try allocator.create(Node); + node_ptr.* = Node{ + .allocator = allocator, + .token = token, + .route = route, + .children = TokenHashMap(*Node).init(allocator), + }; + + return node_ptr; + } - return node_ptr; - } + pub fn deinit(self: *Node) void { + var iter = self.children.valueIterator(); - pub fn deinit(self: *Node) void { - var iter = self.children.valueIterator(); + while (iter.next()) |node| { + node.*.deinit(); + } - while (iter.next()) |node| { - node.*.deinit(); + self.children.deinit(); + self.allocator.destroy(self); } - - self.children.deinit(); - self.allocator.destroy(self); - } - }; - - allocator: std.mem.Allocator, - root: *Node, - - pub fn init(allocator: std.mem.Allocator) !RoutingTrie { - return RoutingTrie{ - .allocator = allocator, - .root = try Node.init( - allocator, - Token{ .fragment = "" }, - Route.init(), - ), }; - } - pub fn deinit(self: *RoutingTrie) void { - self.root.deinit(); - } + allocator: std.mem.Allocator, + root: *Node, - fn print_node(root: *Node) void { - var iter = root.children.iterator(); + pub fn init(allocator: std.mem.Allocator) !Self { + return Self{ + .allocator = allocator, + .root = try Node.init( + allocator, + Token{ .fragment = "" }, + Route.init(), + ), + }; + } - while (iter.next()) |entry| { - const node_ptr = entry.value_ptr.*; - std.io.getStdOut().writer().print( - "Token: {any}\n", - .{node_ptr.token}, - ) catch return; - print_node(entry.value_ptr.*); + pub fn deinit(self: *Self) void { + self.root.deinit(); } - } - fn print(self: *RoutingTrie) void { - print_node(self.root); - } + fn print_node(root: *Node) void { + var iter = root.children.iterator(); - pub fn add_route(self: *RoutingTrie, path: []const u8, route: Route) !void { - // This is where we will parse out the path. - var iter = std.mem.tokenizeScalar(u8, path, '/'); - - var current = self.root; - while (iter.next()) |chunk| { - const token: Token = Token.parse_chunk(chunk); - if (current.children.get(token)) |child| { - current = child; - } else { - try current.children.put( - token, - try Node.init(self.allocator, token, null), - ); - - current = current.children.get(token).?; + while (iter.next()) |entry| { + const node_ptr = entry.value_ptr.*; + std.io.getStdOut().writer().print( + "Token: {any}\n", + .{node_ptr.token}, + ) catch return; + print_node(entry.value_ptr.*); } } - current.route = route; - } - - pub fn get_route( - self: RoutingTrie, - path: []const u8, - captures: []Capture, - queries: *QueryMap, - ) ?FoundRoute { - var capture_idx: usize = 0; - - queries.clearRetainingCapacity(); - - const query_pos = std.mem.indexOfScalar(u8, path, '?'); - var iter = std.mem.tokenizeScalar(u8, path[0..(query_pos orelse path.len)], '/'); - var current = self.root; - - slash_loop: while (iter.next()) |chunk| { - const fragment = Token{ .fragment = chunk }; + fn print(self: *Self) void { + print_node(self.root); + } - // If it is the fragment, match it here. - if (current.children.get(fragment)) |child| { - current = child; - continue; - } + pub fn add_route(self: *Self, path: []const u8, route: Route) !void { + // This is where we will parse out the path. + var iter = std.mem.tokenizeScalar(u8, path, '/'); - var matched = false; - for (std.meta.tags(TokenMatch)) |token_type| { - const token = Token{ .match = token_type }; + var current = self.root; + while (iter.next()) |chunk| { + const token: Token = Token.parse_chunk(chunk); if (current.children.get(token)) |child| { - matched = true; - switch (token_type) { - .signed => if (std.fmt.parseInt(i64, chunk, 10)) |value| { - captures[capture_idx] = Capture{ .signed = value }; - } else |_| continue, - .unsigned => if (std.fmt.parseInt(u64, chunk, 10)) |value| { - captures[capture_idx] = Capture{ .unsigned = value }; - } else |_| continue, - .float => if (std.fmt.parseFloat(f64, chunk)) |value| { - captures[capture_idx] = Capture{ .float = value }; - } else |_| continue, - .string => captures[capture_idx] = Capture{ .string = chunk }, - // This ends the matching sequence and claims everything. - // Does not match the query statement! - .remaining => { - const rest = iter.buffer[(iter.index - chunk.len)..]; - captures[capture_idx] = Capture{ .remaining = rest }; - current.route = child.route.?; - capture_idx += 1; - break :slash_loop; - }, - } - current = child; - capture_idx += 1; - - if (capture_idx > captures.len) { - // Should return an error here but for now, - // itll just be a null. - return null; - } + } else { + try current.children.put( + token, + try Node.init(self.allocator, token, null), + ); - break; + current = current.children.get(token).?; } } - // If we failed to match, - // this is an invalid route. - if (!matched) { - return null; - } + current.route = route; } - if (query_pos) |pos| { - if (path.len > pos + 1) { - var query_iter = std.mem.tokenizeScalar(u8, path[pos + 1 ..], '&'); - - while (query_iter.next()) |chunk| { - if (queries.count() >= queries.capacity() / 2) { - return null; - } + pub fn get_route( + self: Self, + path: []const u8, + captures: []Capture, + queries: *QueryMap, + ) ?FoundRoute { + var capture_idx: usize = 0; - const field_idx = std.mem.indexOfScalar(u8, chunk, '=') orelse break; - if (chunk.len < field_idx + 1) break; + queries.clearRetainingCapacity(); - const key = chunk[0..field_idx]; - const value = chunk[(field_idx + 1)..]; + const query_pos = std.mem.indexOfScalar(u8, path, '?'); + var iter = std.mem.tokenizeScalar(u8, path[0..(query_pos orelse path.len)], '/'); + var current = self.root; - assert(std.mem.indexOfScalar(u8, key, '=') == null); - assert(std.mem.indexOfScalar(u8, value, '=') == null); + slash_loop: while (iter.next()) |chunk| { + const fragment = Token{ .fragment = chunk }; - queries.putAssumeCapacity(key, value); + // If it is the fragment, match it here. + if (current.children.get(fragment)) |child| { + current = child; + continue; } - } - } - const route = current.route orelse return null; - return FoundRoute{ - .route = route, - .captures = captures[0..capture_idx], - .queries = queries, - }; - } -}; + var matched = false; + for (std.meta.tags(TokenMatch)) |token_type| { + const token = Token{ .match = token_type }; + if (current.children.get(token)) |child| { + matched = true; + switch (token_type) { + .signed => if (std.fmt.parseInt(i64, chunk, 10)) |value| { + captures[capture_idx] = Capture{ .signed = value }; + } else |_| continue, + .unsigned => if (std.fmt.parseInt(u64, chunk, 10)) |value| { + captures[capture_idx] = Capture{ .unsigned = value }; + } else |_| continue, + .float => if (std.fmt.parseFloat(f64, chunk)) |value| { + captures[capture_idx] = Capture{ .float = value }; + } else |_| continue, + .string => captures[capture_idx] = Capture{ .string = chunk }, + // This ends the matching sequence and claims everything. + // Does not match the query statement! + .remaining => { + const rest = iter.buffer[(iter.index - chunk.len)..]; + captures[capture_idx] = Capture{ .remaining = rest }; + current.route = child.route.?; + capture_idx += 1; + break :slash_loop; + }, + } -const testing = std.testing; + current = child; + capture_idx += 1; -test "Chunk Parsing (Fragment)" { - const chunk = "thisIsAFragment"; - const token: Token = Token.parse_chunk(chunk); + if (capture_idx > captures.len) { + // Should return an error here but for now, + // itll just be a null. + return null; + } - switch (token) { - .fragment => |inner| try testing.expectEqualStrings(chunk, inner), - .match => return error.IncorrectTokenParsing, - } -} + break; + } + } -test "Chunk Parsing (Match)" { - const chunks: [5][]const u8 = .{ - "%i", - "%d", - "%u", - "%f", - "%s", - }; + // If we failed to match, + // this is an invalid route. + if (!matched) { + return null; + } + } - const matches = [_]TokenMatch{ - TokenMatch.signed, - TokenMatch.signed, - TokenMatch.unsigned, - TokenMatch.float, - TokenMatch.string, - }; + if (query_pos) |pos| { + if (path.len > pos + 1) { + var query_iter = std.mem.tokenizeScalar(u8, path[pos + 1 ..], '&'); - for (chunks, matches) |chunk, match| { - const token: Token = Token.parse_chunk(chunk); + while (query_iter.next()) |chunk| { + if (queries.count() >= queries.capacity() / 2) { + return null; + } - switch (token) { - .fragment => return error.IncorrectTokenParsing, - .match => |inner| try testing.expectEqual(match, inner), - } - } -} + const field_idx = std.mem.indexOfScalar(u8, chunk, '=') orelse break; + if (chunk.len < field_idx + 1) break; -test "Path Parsing (Mixed)" { - const path = "/item/%i/description"; + const key = chunk[0..field_idx]; + const value = chunk[(field_idx + 1)..]; - const parsed: [3]Token = .{ - .{ .fragment = "item" }, - .{ .match = .signed }, - .{ .fragment = "description" }, - }; + assert(std.mem.indexOfScalar(u8, key, '=') == null); + assert(std.mem.indexOfScalar(u8, value, '=') == null); - var iter = std.mem.tokenizeScalar(u8, path, '/'); + queries.putAssumeCapacity(key, value); + } + } + } - for (parsed) |expected| { - const token = Token.parse_chunk(iter.next().?); - switch (token) { - .fragment => |inner| try testing.expectEqualStrings(expected.fragment, inner), - .match => |inner| try testing.expectEqual(expected.match, inner), + const route = current.route orelse return null; + return FoundRoute{ + .route = route, + .captures = captures[0..capture_idx], + .queries = queries, + }; } - } -} - -test "Custom Hashing" { - var s = TokenHashMap(bool).init(testing.allocator); - { - try s.put(.{ .fragment = "item" }, true); - try s.put(.{ .fragment = "thisisfalse" }, false); - - const state = s.get(.{ .fragment = "item" }).?; - try testing.expect(state); - - const should_be_false = s.get(.{ .fragment = "thisisfalse" }).?; - try testing.expect(!should_be_false); - } - - { - try s.put(.{ .match = .unsigned }, true); - try s.put(.{ .match = .float }, false); - try s.put(.{ .match = .string }, false); - - const state = s.get(.{ .match = .unsigned }).?; - try testing.expect(state); - - const should_be_false = s.get(.{ .match = .float }).?; - try testing.expect(!should_be_false); - - const string_state = s.get(.{ .match = .string }).?; - try testing.expect(!string_state); - } - - defer s.deinit(); -} - -test "Constructing Routing from Path" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%i/description", Route.init()); - try s.add_route("/item/%i/hello", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%s", Route.init()); - try s.add_route("/item/list", Route.init()); - - try testing.expectEqual(1, s.root.children.count()); -} - -test "Routing with Paths" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%i/description", Route.init()); - try s.add_route("/item/%i/hello", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%s", Route.init()); - try s.add_route("/item/list", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; - - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].string); - } - - { - const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; - - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - } -} - -test "Routing with Remaining" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%r", Route.init()); - try s.add_route("/item/%i/price/%f", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); - } - { - const captured = s.get_route("/item/name/THIS/IS/A/FILE/SYSTEM/PATH.html", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("THIS/IS/A/FILE/SYSTEM/PATH.html", captured.captures[0].remaining); - } - - { - const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - } - - { - const captured = s.get_route("/item/100/price/283.21", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(100, captured.captures[0].signed); - try testing.expectEqual(283.21, captured.captures[1].float); - } + }; } -test "Routing with Queries" { - var s = try RoutingTrie.init(testing.allocator); - defer s.deinit(); - - var q = QueryMap.init(testing.allocator); - try q.ensureTotalCapacity(8); - defer q.deinit(); - - var captures: [8]Capture = [_]Capture{undefined} ** 8; - - try s.add_route("/item", Route.init()); - try s.add_route("/item/%f/price_float", Route.init()); - try s.add_route("/item/name/%r", Route.init()); - try s.add_route("/item/%i/price/%f", Route.init()); - - try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); - - { - const captured = s.get_route("/item/name/HELLO?name=muki&food=waffle", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); - try testing.expectEqual(2, q.count()); - try testing.expectEqualStrings("muki", q.get("name").?); - try testing.expectEqualStrings("waffle", q.get("food").?); - } - - { - // Purposefully bad format with no keys or values. - const captured = s.get_route("/item/2112.22121/price_float?", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(2112.22121, captured.captures[0].float); - try testing.expectEqual(0, q.count()); - } - - { - // Purposefully bad format with incomplete key/value pair. - const captured = s.get_route("/item/100/price/283.21?help", captures[0..], &q).?; - try testing.expectEqual(Route.init(), captured.route); - try testing.expectEqual(100, captured.captures[0].signed); - try testing.expectEqual(283.21, captured.captures[1].float); - try testing.expectEqual(0, q.count()); - } +const testing = std.testing; - { - // Purposefully have too many queries. - const captured = s.get_route("/item/100/price/283.21?a=1&b=2&c=3&d=4&e=5&f=6&g=7&h=8&i=9&j=10&k=11", captures[0..], &q); - try testing.expectEqual(null, captured); - } -} +//test "Chunk Parsing (Fragment)" { +// const chunk = "thisIsAFragment"; +// const token: Token = Token.parse_chunk(chunk); +// +// switch (token) { +// .fragment => |inner| try testing.expectEqualStrings(chunk, inner), +// .match => return error.IncorrectTokenParsing, +// } +//} +// +//test "Chunk Parsing (Match)" { +// const chunks: [5][]const u8 = .{ +// "%i", +// "%d", +// "%u", +// "%f", +// "%s", +// }; +// +// const matches = [_]TokenMatch{ +// TokenMatch.signed, +// TokenMatch.signed, +// TokenMatch.unsigned, +// TokenMatch.float, +// TokenMatch.string, +// }; +// +// for (chunks, matches) |chunk, match| { +// const token: Token = Token.parse_chunk(chunk); +// +// switch (token) { +// .fragment => return error.IncorrectTokenParsing, +// .match => |inner| try testing.expectEqual(match, inner), +// } +// } +//} +// +//test "Path Parsing (Mixed)" { +// const path = "/item/%i/description"; +// +// const parsed: [3]Token = .{ +// .{ .fragment = "item" }, +// .{ .match = .signed }, +// .{ .fragment = "description" }, +// }; +// +// var iter = std.mem.tokenizeScalar(u8, path, '/'); +// +// for (parsed) |expected| { +// const token = Token.parse_chunk(iter.next().?); +// switch (token) { +// .fragment => |inner| try testing.expectEqualStrings(expected.fragment, inner), +// .match => |inner| try testing.expectEqual(expected.match, inner), +// } +// } +//} +// +//test "Custom Hashing" { +// var s = TokenHashMap(bool).init(testing.allocator); +// { +// try s.put(.{ .fragment = "item" }, true); +// try s.put(.{ .fragment = "thisisfalse" }, false); +// +// const state = s.get(.{ .fragment = "item" }).?; +// try testing.expect(state); +// +// const should_be_false = s.get(.{ .fragment = "thisisfalse" }).?; +// try testing.expect(!should_be_false); +// } +// +// { +// try s.put(.{ .match = .unsigned }, true); +// try s.put(.{ .match = .float }, false); +// try s.put(.{ .match = .string }, false); +// +// const state = s.get(.{ .match = .unsigned }).?; +// try testing.expect(state); +// +// const should_be_false = s.get(.{ .match = .float }).?; +// try testing.expect(!should_be_false); +// +// const string_state = s.get(.{ .match = .string }).?; +// try testing.expect(!string_state); +// } +// +// defer s.deinit(); +//} +// +//test "Constructing Routing from Path" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%i/description", Route.init()); +// try s.add_route("/item/%i/hello", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%s", Route.init()); +// try s.add_route("/item/list", Route.init()); +// +// try testing.expectEqual(1, s.root.children.count()); +//} +// +//test "Routing with Paths" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%i/description", Route.init()); +// try s.add_route("/item/%i/hello", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%s", Route.init()); +// try s.add_route("/item/list", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; +// +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].string); +// } +// +// { +// const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; +// +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// } +//} +// +//test "Routing with Remaining" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%r", Route.init()); +// try s.add_route("/item/%i/price/%f", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); +// } +// { +// const captured = s.get_route("/item/name/THIS/IS/A/FILE/SYSTEM/PATH.html", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("THIS/IS/A/FILE/SYSTEM/PATH.html", captured.captures[0].remaining); +// } +// +// { +// const captured = s.get_route("/item/2112.22121/price_float", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// } +// +// { +// const captured = s.get_route("/item/100/price/283.21", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(100, captured.captures[0].signed); +// try testing.expectEqual(283.21, captured.captures[1].float); +// } +//} +// +//test "Routing with Queries" { +// var s = try RoutingTrie.init(testing.allocator); +// defer s.deinit(); +// +// var q = QueryMap.init(testing.allocator); +// try q.ensureTotalCapacity(8); +// defer q.deinit(); +// +// var captures: [8]Capture = [_]Capture{undefined} ** 8; +// +// try s.add_route("/item", Route.init()); +// try s.add_route("/item/%f/price_float", Route.init()); +// try s.add_route("/item/name/%r", Route.init()); +// try s.add_route("/item/%i/price/%f", Route.init()); +// +// try testing.expectEqual(null, s.get_route("/item/name", captures[0..], &q)); +// +// { +// const captured = s.get_route("/item/name/HELLO?name=muki&food=waffle", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqualStrings("HELLO", captured.captures[0].remaining); +// try testing.expectEqual(2, q.count()); +// try testing.expectEqualStrings("muki", q.get("name").?); +// try testing.expectEqualStrings("waffle", q.get("food").?); +// } +// +// { +// // Purposefully bad format with no keys or values. +// const captured = s.get_route("/item/2112.22121/price_float?", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(2112.22121, captured.captures[0].float); +// try testing.expectEqual(0, q.count()); +// } +// +// { +// // Purposefully bad format with incomplete key/value pair. +// const captured = s.get_route("/item/100/price/283.21?help", captures[0..], &q).?; +// try testing.expectEqual(Route.init(), captured.route); +// try testing.expectEqual(100, captured.captures[0].signed); +// try testing.expectEqual(283.21, captured.captures[1].float); +// try testing.expectEqual(0, q.count()); +// } +// +// { +// // Purposefully have too many queries. +// const captured = s.get_route("/item/100/price/283.21?a=1&b=2&c=3&d=4&e=5&f=6&g=7&h=8&i=9&j=10&k=11", captures[0..], &q); +// try testing.expectEqual(null, captured); +// } +//} diff --git a/src/http/server.zig b/src/http/server.zig index 78b3088..7dc3cd5 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -9,10 +9,17 @@ const TLSFileOptions = @import("../tls/lib.zig").TLSFileOptions; const TLSContext = @import("../tls/lib.zig").TLSContext; const TLS = @import("../tls/lib.zig").TLS; +const _Context = @import("context.zig").Context; +const Request = @import("request.zig").Request; +const Response = @import("response.zig").Response; +const Capture = @import("routing_trie.zig").Capture; +const QueryMap = @import("routing_trie.zig").QueryMap; +const ResponseSetOptions = Response.ResponseSetOptions; + const Provision = @import("provision.zig").Provision; const Mime = @import("mime.zig").Mime; -const Router = @import("router.zig").Router; -const Context = @import("context.zig").Context; +const _Router = @import("router.zig").Router; +const _Route = @import("route.zig").Route; const HTTPError = @import("lib.zig").HTTPError; const Pool = @import("tardy").Pool; @@ -44,82 +51,6 @@ pub const Security = union(enum) { }, }; -/// These are various general configuration -/// options that are important for the actual framework. -/// -/// This includes various different options and limits -/// for interacting with the underlying network. -pub const ServerConfig = struct { - /// The allocator that server will use. - allocator: std.mem.Allocator, - /// HTTP Request Router. - router: *Router, - /// Threading Model to use. - /// - /// Default: .auto - threading: Threading = .auto, - /// Kernel Backlog Value. - size_backlog: u31 = 512, - /// Number of Maximum Concurrent Connections. - /// - /// This is applied PER thread if using multi-threading. - /// zzz will drop/close any connections greater - /// than this. - /// - /// You want to tune this to your expected number - /// of maximum connections. - /// - /// Default: 1024 - size_connections_max: u16 = 1024, - /// Maximum number of completions we can reap - /// with a single call of reap(). - /// - /// Default: 256 - size_completions_reap_max: u16 = 256, - /// Amount of allocated memory retained - /// after an arena is cleared. - /// - /// A higher value will increase memory usage but - /// should make allocators faster.Tardy - /// - /// A lower value will reduce memory usage but - /// will make allocators slower. - /// - /// Default: 1KB - size_connection_arena_retain: u32 = 1024, - /// Size of the buffer (in bytes) used for - /// interacting with the socket. - /// - /// Default: 4 KB. - size_socket_buffer: u32 = 1024 * 4, - /// Maximum size (in bytes) of the Recv buffer. - /// This is mainly a concern when you are reading in - /// large requests before responding. - /// - /// Default: 2MB. - size_recv_buffer_max: u32 = 1024 * 1024 * 2, - /// Maximum number of Headers in a Request/Response - /// - /// Default: 32 - num_header_max: u32 = 32, - /// Maximum number of Captures in a Route - /// - /// Default: 8 - num_captures_max: u32 = 8, - /// Maximum number of Queries in a URL - /// - /// Default: 8 - num_queries_max: u32 = 8, - /// Maximum size (in bytes) of the Request. - /// - /// Default: 2MB. - size_request_max: u32 = 1024 * 1024 * 2, - /// Maximum size (in bytes) of the Request URI. - /// - /// Default: 2KB. - size_request_uri_max: u32 = 1024 * 2, -}; - /// Uses the current p.response to generate and queue up the sending /// of a response. This is used when we already know what we want to send. /// @@ -138,78 +69,6 @@ pub inline fn raw_respond(p: *Provision) !RecvStatus { return .{ .send = pseudo }; } -fn route_and_respond(runtime: *Runtime, trigger: TaskFn, p: *Provision, router: *const Router) !RecvStatus { - route: { - const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); - if (found) |f| { - const handler = f.route.get_handler(p.request.method); - - if (handler) |func| { - const context: *Context = try p.arena.allocator().create(Context); - context.* = Context.init( - p.arena.allocator(), - trigger, - runtime, - p, - &p.request, - &p.response, - p.request.uri, - f.captures, - f.queries, - ); - - @call(.auto, func, .{context}); - return .spawned; - } else { - // If we match the route but not the method. - p.response.set(.{ - .status = .@"Method Not Allowed", - .mime = Mime.HTML, - .body = "405 Method Not Allowed", - }); - - // We also need to add to Allow header. - // This uses the connection's arena to allocate 64 bytes. - const allowed = f.route.get_allowed(p.arena.allocator()) catch { - p.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - - break :route; - }; - - p.response.headers.add("Allow", allowed) catch { - p.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - - break :route; - }; - - break :route; - } - } - - // Didn't match any route. - p.response.set(.{ - .status = .@"Not Found", - .mime = Mime.HTML, - .body = "404 Not Found", - }); - break :route; - } - - if (p.response.status == .Kill) { - return .kill; - } - - return try raw_respond(p); -} - pub fn Server( comptime security: Security, comptime async_type: AsyncIOType, @@ -220,12 +79,162 @@ pub fn Server( return struct { const Self = @This(); + pub const Context = _Context(Self); + pub const Router = _Router(Self); + pub const Route = _Route(Self); allocator: std.mem.Allocator, tardy: Tardy, config: ServerConfig, addr: std.net.Address, tls_ctx: TLSContextType, + fn route_and_respond(runtime: *Runtime, p: *Provision, router: *const Router) !RecvStatus { + route: { + const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); + if (found) |f| { + const handler = f.route.get_handler(p.request.method); + + if (handler) |func| { + const context: *Context = try p.arena.allocator().create(Context); + context.* = .{ + .allocator = p.arena.allocator(), + .runtime = runtime, + .request = &p.request, + .response = &p.response, + .path = p.request.uri, + .captures = f.captures, + .queries = f.queries, + .provision = p, + }; + + @call(.auto, func, .{context}); + return .spawned; + } else { + // If we match the route but not the method. + p.response.set(.{ + .status = .@"Method Not Allowed", + .mime = Mime.HTML, + .body = "405 Method Not Allowed", + }); + + // We also need to add to Allow header. + // This uses the connection's arena to allocate 64 bytes. + const allowed = f.route.get_allowed(p.arena.allocator()) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + p.response.headers.add("Allow", allowed) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + break :route; + } + } + + // Didn't match any route. + p.response.set(.{ + .status = .@"Not Found", + .mime = Mime.HTML, + .body = "404 Not Found", + }); + break :route; + } + + if (p.response.status == .Kill) { + return .kill; + } + + return try raw_respond(p); + } + + /// These are various general configuration + /// options that are important for the actual framework. + /// + /// This includes various different options and limits + /// for interacting with the underlying network. + pub const ServerConfig = struct { + /// The allocator that server will use. + allocator: std.mem.Allocator, + /// HTTP Request Router. + router: *Router, + /// Threading Model to use. + /// + /// Default: .auto + threading: Threading = .auto, + /// Kernel Backlog Value. + size_backlog: u31 = 512, + /// Number of Maximum Concurrent Connections. + /// + /// This is applied PER thread if using multi-threading. + /// zzz will drop/close any connections greater + /// than this. + /// + /// You want to tune this to your expected number + /// of maximum connections. + /// + /// Default: 1024 + size_connections_max: u16 = 1024, + /// Maximum number of completions we can reap + /// with a single call of reap(). + /// + /// Default: 256 + size_completions_reap_max: u16 = 256, + /// Amount of allocated memory retained + /// after an arena is cleared. + /// + /// A higher value will increase memory usage but + /// should make allocators faster.Tardy + /// + /// A lower value will reduce memory usage but + /// will make allocators slower. + /// + /// Default: 1KB + size_connection_arena_retain: u32 = 1024, + /// Size of the buffer (in bytes) used for + /// interacting with the socket. + /// + /// Default: 4 KB. + size_socket_buffer: u32 = 1024 * 4, + /// Maximum size (in bytes) of the Recv buffer. + /// This is mainly a concern when you are reading in + /// large requests before responding. + /// + /// Default: 2MB. + size_recv_buffer_max: u32 = 1024 * 1024 * 2, + /// Maximum number of Headers in a Request/Response + /// + /// Default: 32 + num_header_max: u32 = 32, + /// Maximum number of Captures in a Route + /// + /// Default: 8 + num_captures_max: u32 = 8, + /// Maximum number of Queries in a URL + /// + /// Default: 8 + num_queries_max: u32 = 8, + /// Maximum size (in bytes) of the Request. + /// + /// Default: 2MB. + size_request_max: u32 = 1024 * 1024 * 2, + /// Maximum size (in bytes) of the Request URI. + /// + /// Default: 2KB. + size_request_uri_max: u32 = 1024 * 2, + }; + pub fn init(config: ServerConfig) Self { const tls_ctx = switch (comptime security) { .tls => |inner| TLSContext.init(.{ @@ -319,8 +328,7 @@ pub fn Server( }; } - fn close_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + fn close_task(rt: *Runtime, _: *const Task, provision: *Provision) !void { assert(provision.job == .close); const server_socket = rt.storage.get("server_socket", std.posix.socket_t); const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); @@ -354,14 +362,16 @@ pub fn Server( const accept_queued = rt.storage.get_ptr("accept_queued", bool); if (!accept_queued.*) { accept_queued.* = true; - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); + try rt.net.accept( + std.posix.socket_t, + accept_task, + server_socket, + server_socket, + ); } } - fn accept_task(rt: *Runtime, t: *const Task, _: ?*anyopaque) !void { + fn accept_task(rt: *Runtime, t: *const Task, server_socket: std.posix.socket_t) !void { const child_socket = t.result.?.socket; const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); @@ -370,11 +380,12 @@ pub fn Server( if (rt.scheduler.tasks.clean() >= 2) { accept_queued.* = true; - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - try rt.net.accept(.{ - .socket = server_socket, - .func = accept_task, - }); + try rt.net.accept( + std.posix.socket_t, + accept_task, + server_socket, + server_socket, + ); } if (!Cross.socket.is_valid(child_socket)) { @@ -413,81 +424,71 @@ pub fn Server( tls_ptr.* = tls_ctx.create(child_socket) catch |e| { log.err("{d} - tls creation failed={any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSCreationFailed; }; const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSStartHandshakeFailed; }; provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = recv_buf, - .func = handshake_task, - .ctx = borrowed.item, - }); + try rt.net.recv( + *Provision, + handshake_task, + borrowed.item, + child_socket, + recv_buf, + ); }, .plain => { provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = child_socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = borrowed.item, - }); + try rt.net.recv( + *Provision, + recv_task, + provision, + child_socket, + provision.buffer, + ); }, } } /// This is the task you MUST trigger if the `recv_fn` returns `.spawned`. - fn trigger_task(rt: *Runtime, _: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); - + pub fn trigger_task(rt: *Runtime, _: *const Task, provision: *Provision) !void { switch (provision.job) { else => unreachable, .recv => { - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); + try rt.net.recv( + *Provision, + recv_task, + provision, + provision.socket, + provision.buffer, + ); }, .send => |*send_job| { const config = rt.storage.get_const_ptr("config", ServerConfig); const plain_buffer = send_job.slice.get(0, config.size_socket_buffer); + if (provision.response.status.? == .Kill) { + rt.stop(); + return; + } + switch (comptime security) { .tls => |_| { - const tls_slice: []TLSType = @as( - [*]TLSType, - @ptrCast(@alignCast(rt.storage.get("tls_slice").?)), - )[0..config.size_connections_max]; - + const tls_slice = rt.storage.get("tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .fd = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSEncryptFailed; }; @@ -499,30 +500,31 @@ pub fn Server( }, }; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = encrypted_buffer, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + encrypted_buffer, + ); }, .plain => { send_job.security = .plain; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + provision.buffer, + ); }, } }, } } - fn recv_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + fn recv_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { assert(provision.job == .recv); const length: i32 = t.result.?.value; @@ -533,11 +535,7 @@ pub fn Server( // If the socket is closed. if (length <= 0) { provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return; } @@ -551,18 +549,13 @@ pub fn Server( switch (comptime security) { .tls => |_| { const tls_slice = rt.storage.get("tls_slice", []TLSType); - const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSDecryptFailed; }; }, @@ -670,7 +663,7 @@ pub fn Server( } if (!provision.request.expect_body()) { - break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, config.router) catch unreachable; } // Everything after here is a Request that is expecting a body. @@ -697,7 +690,7 @@ pub fn Server( log.debug("{d} - got whole body with header", .{provision.index}); const body_end = header_end + difference; provision.request.set_body(provision.recv_buffer.items[header_end..body_end]); - break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, config.router) catch unreachable; } else { // Partial Body log.debug("{d} - got partial body with header", .{provision.index}); @@ -710,7 +703,7 @@ pub fn Server( log.debug("{d} - got body of length 0", .{provision.index}); // Body of Length 0. provision.request.set_body(""); - break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, config.router) catch unreachable; } else { // Got only header. log.debug("{d} - got all header aka no body", .{provision.index}); @@ -761,7 +754,7 @@ pub fn Server( if (job.count >= request_length) { provision.request.set_body(provision.recv_buffer.items[header_end..request_length]); - break :status route_and_respond(rt, trigger_task, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, config.router) catch unreachable; } else { break :status .recv; } @@ -776,12 +769,13 @@ pub fn Server( return error.Killed; }, .recv => { - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); + try rt.net.recv( + *Provision, + recv_task, + provision, + provision.socket, + provision.buffer, + ); }, .send => |*pslice| { const plain_buffer = pslice.get(0, config.size_socket_buffer); @@ -789,18 +783,13 @@ pub fn Server( switch (comptime security) { .tls => |_| { const tls_slice = rt.storage.get("tls_slice", []TLSType); - const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSEncryptFailed; }; @@ -817,12 +806,13 @@ pub fn Server( }, }; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = encrypted_buffer, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + provision.buffer, + ); }, .plain => { provision.job = .{ @@ -833,22 +823,21 @@ pub fn Server( }, }; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + provision.buffer, + ); }, } }, } } - fn handshake_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - log.debug("Handshake Task", .{}); + fn handshake_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { assert(security == .tls); - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); const length: i32 = t.result.?.value; if (comptime security == .tls) { @@ -865,128 +854,68 @@ pub fn Server( if (length <= 0) { log.debug("handshake connection closed", .{}); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSHandshakeClosed; } if (handshake_job.count >= 50) { log.debug("handshake taken too many cycles", .{}); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSHandshakeTooManyCycles; } const hs_length: usize = @intCast(length); - switch (handshake_job.state) { - .recv => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .recv = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on recv failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeRecvFailed; - }; + const hstate = switch (handshake_job.state) { + .recv => tls_ptr.*.?.continue_handshake(.{ .recv = @intCast(hs_length) }), + .send => tls_ptr.*.?.continue_handshake(.{ .send = @intCast(hs_length) }), + } catch |e| { + log.err("{d} - tls handshake failed={any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(*Provision, close_task, provision, provision.socket); + return error.TLSHandshakeRecvFailed; + }; - switch (hstate) { - .recv => |buf| { - log.debug("requeing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("queueing send in handshake", .{}); - handshake_job.state = .send; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } + switch (hstate) { + .recv => |buf| { + log.debug("queueing recv in handshake", .{}); + handshake_job.state = .recv; + try rt.net.recv( + *Provision, + handshake_task, + provision, + provision.socket, + buf, + ); }, - .send => { - // on recv, we want to read from socket and feed into tls engien - const hstate = tls_ptr.*.?.continue_handshake( - .{ .send = @intCast(hs_length) }, - ) catch |e| { - log.err("{d} - tls handshake on send failed={any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); - return error.TLSHandshakeSendFailed; - }; - - switch (hstate) { - .recv => |buf| { - handshake_job.state = .recv; - log.debug("queuing recv in handshake", .{}); - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .send => |buf| { - log.debug("requeing send in handshake", .{}); - try rt.net.send(.{ - .socket = provision.socket, - .buffer = buf, - .func = handshake_task, - .ctx = provision, - }); - }, - .complete => { - log.debug("handshake complete", .{}); - provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); - }, - } + .send => |buf| { + log.debug("queueing send in handshake", .{}); + handshake_job.state = .send; + try rt.net.send( + *Provision, + handshake_task, + provision, + provision.socket, + buf, + ); + }, + .complete => { + log.debug("handshake complete", .{}); + provision.job = .{ .recv = .{ .count = 0 } }; + try rt.net.recv( + *Provision, + recv_task, + provision, + provision.socket, + provision.buffer, + ); }, } - } else unreachable; + } } - fn send_task(rt: *Runtime, t: *const Task, ctx: ?*anyopaque) !void { - const provision: *Provision = @ptrCast(@alignCast(ctx.?)); + fn send_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { assert(provision.job == .send); const length: i32 = t.result.?.value; @@ -995,11 +924,7 @@ pub fn Server( // If the socket is closed. if (length <= 0) { provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return; } @@ -1028,12 +953,13 @@ pub fn Server( provision.recv_buffer.clearRetainingCapacity(); provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); + try rt.net.recv( + *Provision, + recv_task, + provision, + provision.socket, + provision.buffer, + ); } else { // Queue a new chunk up for sending. log.debug( @@ -1054,23 +980,20 @@ pub fn Server( const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(.{ - .socket = provision.socket, - .func = close_task, - .ctx = provision, - }); + try rt.net.close(*Provision, close_task, provision, provision.socket); return error.TLSEncryptFailed; }; job_tls.encrypted = encrypted; job_tls.encrypted_count = 0; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = job_tls.encrypted, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + job_tls.encrypted, + ); } } else { log.debug( @@ -1079,12 +1002,13 @@ pub fn Server( ); const remainder = job_tls.encrypted[job_tls.encrypted_count..]; - try rt.net.send(.{ - .socket = provision.socket, - .buffer = remainder, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + send_task, + provision, + provision.socket, + remainder, + ); } }, .plain => { @@ -1099,12 +1023,13 @@ pub fn Server( provision.recv_buffer.clearRetainingCapacity(); provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv(.{ - .socket = provision.socket, - .buffer = provision.buffer, - .func = recv_task, - .ctx = provision, - }); + try rt.net.recv( + *Provision, + recv_task, + provision, + provision.socket, + provision.buffer, + ); } else { log.debug( "{d} - sending next chunk starting at index {d}", @@ -1121,12 +1046,13 @@ pub fn Server( plain_buffer.len + send_job.count, }); - try rt.net.send(.{ - .socket = provision.socket, - .buffer = plain_buffer, - .func = send_task, - .ctx = provision, - }); + try rt.net.send( + *Provision, + recv_task, + provision, + provision.socket, + plain_buffer, + ); } }, } @@ -1166,16 +1092,18 @@ pub fn Server( // since slices are fat pointers... try rt.storage.store_alloc("tls_slice", tls_slice); - try rt.storage.store_ptr("tls_ctx", zzz.tls_ctx); + try rt.storage.store_ptr("tls_ctx", &zzz.tls_ctx); } try rt.storage.store_alloc("server_socket", socket); try rt.storage.store_alloc("accept_queued", true); - try rt.net.accept(.{ - .socket = socket, - .func = accept_task, - }); + try rt.net.accept( + std.posix.socket_t, + accept_task, + socket, + socket, + ); } }.rt_start, self, From 39e6eacba95e996b7a4d7ef69cef32fd8b86741f Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 27 Oct 2024 01:20:33 -0700 Subject: [PATCH 05/17] feat(response): optional content-length --- src/http/response.zig | 91 +++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/src/http/response.zig b/src/http/response.zig index b08c96e..19f4349 100644 --- a/src/http/response.zig +++ b/src/http/response.zig @@ -79,30 +79,29 @@ pub const Response = struct { } } - pub fn headers_into_buffer(self: *Response, buffer: []u8, content_length: u32) ![]u8 { - var stream = std.io.fixedBufferStream(buffer); - try self.write_headers(stream.writer(), content_length); - return stream.getWritten(); - } + pub fn headers_into_buffer(self: *Response, buffer: []u8, content_length: ?u32) ![]u8 { + var index: usize = 0; - fn write_headers(self: *Response, writer: anytype, content_length: u32) !void { // Status Line - try writer.writeAll("HTTP/1.1 "); + std.mem.copyForwards(u8, buffer[index..], "HTTP/1.1 "); + index += 9; if (self.status) |status| { - try std.fmt.formatInt(@intFromEnum(status), 10, .lower, .{}, writer); - try writer.writeAll(" "); - try writer.writeAll(@tagName(status)); + const status_code = @intFromEnum(status); + const code = try std.fmt.bufPrint(buffer[index..], "{d} ", .{status_code}); + index += code.len; + const status_name = @tagName(status); + std.mem.copyForwards(u8, buffer[index..], status_name); + index += status_name.len; } else { return error.MissingStatus; } - try writer.writeAll("\r\n"); - - // Standard Headers. + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; - // Cache the Date. - // Omits the Date header on any platform that doesn't support timestamp(). + // Standard Headers + // Cache the Date const ts = std.time.timestamp(); if (ts != 0) { if (self.cached_date.ts != ts) { @@ -114,40 +113,56 @@ pub const Response = struct { .index = buf.len, }; } - - assert(self.cached_date.index < self.cached_date.buffer.len); - try writer.writeAll("Date: "); - try writer.writeAll(self.cached_date.buffer[0..self.cached_date.index]); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], "Date: "); + index += 6; + std.mem.copyForwards(u8, buffer[index..], self.cached_date.buffer[0..self.cached_date.index]); + index += self.cached_date.index; + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; } - try writer.writeAll("Server: zzz\r\n"); - try writer.writeAll("Connection: keep-alive\r\n"); + std.mem.copyForwards(u8, buffer[index..], "Server: zzz\r\nConnection: keep-alive\r\n"); + index += 37; // Headers var iter = self.headers.map.iterator(); while (iter.next()) |entry| { - try writer.writeAll(entry.key_ptr.*); - try writer.writeAll(": "); - try writer.writeAll(entry.value_ptr.*); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], entry.key_ptr.*); + index += entry.key_ptr.len; + std.mem.copyForwards(u8, buffer[index..], ": "); + index += 2; + std.mem.copyForwards(u8, buffer[index..], entry.value_ptr.*); + index += entry.value_ptr.len; + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; } - // If we have an associated MIME type. + // Content-Type + std.mem.copyForwards(u8, buffer[index..], "Content-Type: "); + index += 14; if (self.mime) |m| { - try writer.writeAll("Content-Type: "); - try writer.writeAll(m.content_type); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], m.content_type); + index += m.content_type.len; } else { - // By default, we should just send as an octet-stream for safety. - try writer.writeAll("Content-Type: "); - try writer.writeAll(Mime.BIN.content_type); - try writer.writeAll("\r\n"); + std.mem.copyForwards(u8, buffer[index..], Mime.BIN.content_type); + index += Mime.BIN.content_type.len; } + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; + + // Content-Length + if (content_length) |length| { + std.mem.copyForwards(u8, buffer[index..], "Content-Length: "); + index += 16; + const length_str = try std.fmt.bufPrint(buffer[index..], "{d}", .{length}); + index += length_str.len; + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; + } + + std.mem.copyForwards(u8, buffer[index..], "\r\n"); + index += 2; - try writer.writeAll("Content-Length: "); - try std.fmt.formatInt(content_length, 10, .lower, .{}, writer); - try writer.writeAll("\r\n"); - try writer.writeAll("\r\n"); + return buffer[0..index]; } }; From fea9bbbbab8b207a750f4369cf05f69d7b06e2ac Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 27 Oct 2024 02:10:17 -0700 Subject: [PATCH 06/17] fix(response): send correct buffer --- src/http/server.zig | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/http/server.zig b/src/http/server.zig index 7dc3cd5..d26c9fd 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -461,15 +461,6 @@ pub fn Server( pub fn trigger_task(rt: *Runtime, _: *const Task, provision: *Provision) !void { switch (provision.job) { else => unreachable, - .recv => { - try rt.net.recv( - *Provision, - recv_task, - provision, - provision.socket, - provision.buffer, - ); - }, .send => |*send_job| { const config = rt.storage.get_const_ptr("config", ServerConfig); const plain_buffer = send_job.slice.get(0, config.size_socket_buffer); @@ -516,7 +507,7 @@ pub fn Server( send_task, provision, provision.socket, - provision.buffer, + plain_buffer, ); }, } From 7ef39f9770e699fd6da8d557ff26fe9317e3f0f3 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 27 Oct 2024 12:42:17 -0700 Subject: [PATCH 07/17] feat(response): directly send without trigger --- src/http/context.zig | 15 +++++- src/http/server.zig | 109 ++++++++++++++++++++----------------------- 2 files changed, 63 insertions(+), 61 deletions(-) diff --git a/src/http/context.zig b/src/http/context.zig index 9bd3c5d..f10470b 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -48,10 +48,21 @@ pub fn Context(comptime Server: type) type { }, }; - self.runtime.spawn( + const body = options.body orelse ""; + + const first_chunk = Server.prepare_send( + self.runtime, + self.provision, + body, + @intCast(body.len), + ) catch unreachable; + + self.runtime.net.send( *Provision, - Server.trigger_task, + Server.send_task, self.provision, + self.provision.socket, + first_chunk, ) catch unreachable; } }; diff --git a/src/http/server.zig b/src/http/server.zig index d26c9fd..99cee47 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -457,64 +457,6 @@ pub fn Server( } } - /// This is the task you MUST trigger if the `recv_fn` returns `.spawned`. - pub fn trigger_task(rt: *Runtime, _: *const Task, provision: *Provision) !void { - switch (provision.job) { - else => unreachable, - .send => |*send_job| { - const config = rt.storage.get_const_ptr("config", ServerConfig); - const plain_buffer = send_job.slice.get(0, config.size_socket_buffer); - - if (provision.response.status.? == .Kill) { - rt.stop(); - return; - } - - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - const tls_ptr: *TLSType = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); - return error.TLSEncryptFailed; - }; - - send_job.count = plain_buffer.len; - send_job.security = .{ - .tls = .{ - .encrypted = encrypted_buffer, - .encrypted_count = 0, - }, - }; - - try rt.net.send( - *Provision, - send_task, - provision, - provision.socket, - encrypted_buffer, - ); - }, - .plain => { - send_job.security = .plain; - - try rt.net.send( - *Provision, - send_task, - provision, - provision.socket, - plain_buffer, - ); - }, - } - }, - } - } - fn recv_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { assert(provision.job == .recv); const length: i32 = t.result.?.value; @@ -906,7 +848,56 @@ pub fn Server( } } - fn send_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { + /// Prepares the provision send_job and returns the first send chunk + pub fn prepare_send(rt: *Runtime, provision: *Provision, body: []const u8, content_length: ?u32) ![]const u8 { + const config = rt.storage.get_const_ptr("config", ServerConfig); + const headers = try provision.response.headers_into_buffer(provision.buffer, content_length); + var pslice = Pseudoslice.init(headers, body, provision.buffer); + const plain_buffer = pslice.get(0, config.size_socket_buffer); + + switch (comptime security) { + .tls => { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(*Provision, close_task, provision, provision.socket); + return error.TLSEncryptFailed; + }; + + provision.job = .{ + .send = .{ + .slice = pslice, + .count = @intCast(plain_buffer.len), + .security = .{ + .tls = .{ + .encrypted = encrypted_buffer, + .encrypted_count = 0, + }, + }, + }, + }; + + return encrypted_buffer; + }, + .plain => { + provision.job = .{ + .send = .{ + .slice = pslice, + .count = 0, + .security = .plain, + }, + }; + + return plain_buffer; + }, + } + } + + pub fn send_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { assert(provision.job == .send); const length: i32 = t.result.?.value; From 6ad97663596efe163feb9cd75465a76c276eb59b Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 27 Oct 2024 13:36:51 -0700 Subject: [PATCH 08/17] feat(http): SSE Responses --- build.zig | 19 +- examples/http/basic/main.zig | 47 ++- examples/http/benchmark/main.zig | 47 ++- examples/http/sse/index.html | 85 ++++ examples/http/sse/main.zig | 141 +++++++ src/core/job.zig | 14 +- src/core/pseudoslice.zig | 2 +- src/http/context.zig | 89 ++++- src/http/router.zig | 4 +- src/http/server.zig | 661 ++++++++++++++----------------- src/http/sse.zig | 92 +++++ 11 files changed, 787 insertions(+), 414 deletions(-) create mode 100644 examples/http/sse/index.html create mode 100644 examples/http/sse/main.zig create mode 100644 src/http/sse.zig diff --git a/build.zig b/build.zig index 7d51ba0..80c0a2c 100644 --- a/build.zig +++ b/build.zig @@ -28,14 +28,15 @@ pub fn build(b: *std.Build) void { zzz.linkLibrary(bearssl); - add_example(b, "basic", .http, false, target, optimize, zzz); - add_example(b, "custom", .http, false, target, optimize, zzz); - add_example(b, "tls", .http, true, target, optimize, zzz); - add_example(b, "minram", .http, false, target, optimize, zzz); - add_example(b, "fs", .http, false, target, optimize, zzz); - add_example(b, "multithread", .http, false, target, optimize, zzz); - add_example(b, "benchmark", .http, false, target, optimize, zzz); - add_example(b, "valgrind", .http, true, target, optimize, zzz); + add_example(b, "basic", .http, false, target, optimize, zzz, tardy); + add_example(b, "sse", .http, false, target, optimize, zzz, tardy); + add_example(b, "custom", .http, false, target, optimize, zzz, tardy); + add_example(b, "tls", .http, true, target, optimize, zzz, tardy); + add_example(b, "minram", .http, false, target, optimize, zzz, tardy); + add_example(b, "fs", .http, false, target, optimize, zzz, tardy); + add_example(b, "multithread", .http, false, target, optimize, zzz, tardy); + add_example(b, "benchmark", .http, false, target, optimize, zzz, tardy); + add_example(b, "valgrind", .http, true, target, optimize, zzz, tardy); const tests = b.addTest(.{ .name = "tests", @@ -62,6 +63,7 @@ fn add_example( target: std.Build.ResolvedTarget, optimize: std.builtin.Mode, zzz_module: *std.Build.Module, + tardy_module: *std.Build.Module, ) void { const example = b.addExecutable(.{ .name = b.fmt("{s}_{s}", .{ @tagName(protocol), name }), @@ -76,6 +78,7 @@ fn add_example( } example.root_module.addImport("zzz", zzz_module); + example.root_module.addImport("tardy", tardy_module); const install_artifact = b.addInstallArtifact(example, .{}); const run_cmd = b.addRunArtifact(example); diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index 2e25101..a9fc435 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -1,9 +1,14 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/basic"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/basic"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; @@ -11,11 +16,21 @@ const Route = Server.Route; pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; + const max_conn = 512; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + var router = Router.init(allocator); defer router.deinit(); @@ -38,13 +53,23 @@ pub fn main() !void { } }.handler_fn)); - var server = http.Server(.plain, .auto).init(.{ - .router = &router, - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ + .allocator = alloc, + .size_connections_max = max_conn, + }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index 6e289d4..a66ef47 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -1,9 +1,14 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/benchmark"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/benchmark"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); const Context = Server.Context; const Route = Server.Route; const Router = Server.Router; @@ -51,22 +56,44 @@ fn hi_handler(ctx: *Context) void { pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; + const max_conn = 1024; + var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true }){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .auto, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + var router = Router.init(allocator); defer router.deinit(); try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); try router.serve_route("/hi/%s", Route.init().get(hi_handler)); - var server = Server.init(.{ - .router = &router, - .allocator = allocator, - .threading = .auto, - }); - defer server.deinit(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ + .allocator = alloc, + .size_connections_max = max_conn, + }); - try server.bind(host, port); - try server.listen(); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/sse/index.html b/examples/http/sse/index.html new file mode 100644 index 0000000..218bccc --- /dev/null +++ b/examples/http/sse/index.html @@ -0,0 +1,85 @@ + + + + + + SSE Example + + +

Server-Sent Events Example

+ +
+ +
+
+ + + + + + diff --git a/examples/http/sse/main.zig b/examples/http/sse/main.zig new file mode 100644 index 0000000..3518b75 --- /dev/null +++ b/examples/http/sse/main.zig @@ -0,0 +1,141 @@ +const std = @import("std"); +const log = std.log.scoped(.@"examples/sse"); + +const zzz = @import("zzz"); +const http = zzz.HTTP; + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; +const Task = tardy.Task; +const Broadcast = tardy.Broadcast; +const Channel = tardy.Channel; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; +const SSE = Server.SSE; + +const SSEBroadcastContext = struct { + sse: *SSE, + channel: *Channel(usize), +}; + +fn sse_send(_: *Runtime, value_opt: ?*const usize, ctx: *SSEBroadcastContext) !void { + if (value_opt) |value| { + const data = try std.fmt.allocPrint( + ctx.sse.context.allocator, + "value: {d}", + .{value.*}, + ); + + try ctx.sse.send(.{ .data = data }, ctx, sse_recv); + } else { + const broadcast = ctx.sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + broadcast.unsubscribe(ctx.channel); + try ctx.sse.context.close(); + } +} + +fn sse_recv(_: *Runtime, success: bool, ctx: *SSEBroadcastContext) !void { + if (success) { + try ctx.channel.recv(ctx, sse_send); + } else { + log.debug("channel closed", .{}); + const broadcast = ctx.sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + broadcast.unsubscribe(ctx.channel); + } +} + +fn sse_init(rt: *Runtime, success: bool, sse: *SSE) !void { + if (!success) { + // on failure, it'll auto close after + // the sse task. + log.err("sse initalization failed", .{}); + return; + } + + const broadcast = sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + const context = try sse.context.allocator.create(SSEBroadcastContext); + context.* = .{ .sse = sse, .channel = try broadcast.subscribe(rt, 10) }; + try context.channel.recv(context, sse_send); +} + +fn sse_handler(ctx: *Context) void { + log.debug("going into sse mode", .{}); + ctx.to_sse(sse_init) catch unreachable; +} + +fn msg_handler(ctx: *Context) void { + log.debug("message handler", .{}); + const broadcast = ctx.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + broadcast.send(0) catch unreachable; + ctx.respond(.{ + .status = .OK, + .mime = http.Mime.HTML, + .body = "", + }) catch unreachable; +} + +fn kill_handler(ctx: *Context) void { + ctx.runtime.stop(); +} + +pub fn main() !void { + const host: []const u8 = "0.0.0.0"; + const port: u16 = 9862; + const max_conn = 512; + + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .{ .multi = 2 }, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + + var router = Router.init(allocator); + defer router.deinit(); + + try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); + try router.serve_route("/kill", Route.init().get(kill_handler)); + try router.serve_route("/stream", Route.init().get(sse_handler)); + try router.serve_route("/message", Route.init().post(msg_handler)); + + var broadcast = try Broadcast(usize).init(allocator, max_conn); + defer broadcast.deinit(); + + const EntryParams = struct { + router: *const Router, + broadcast: *Broadcast(usize), + }; + + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, params: EntryParams) !void { + try rt.storage.store_ptr("broadcast", params.broadcast); + + var server = Server.init(.{ + .allocator = alloc, + .size_connections_max = max_conn, + }); + + try server.bind(host, port); + try server.serve(params.router, rt); + } + }.entry, + EntryParams{ .router = &router, .broadcast = &broadcast }, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); +} diff --git a/src/core/job.zig b/src/core/job.zig index 01ae302..378b7cb 100644 --- a/src/core/job.zig +++ b/src/core/job.zig @@ -1,7 +1,17 @@ const std = @import("std"); const Pseudoslice = @import("lib.zig").Pseudoslice; -pub const SendType = struct { +const TaskFn = @import("tardy").TaskFn; + +pub const AfterType = union(enum) { + recv, + sse: struct { + func: *const anyopaque, + ctx: *anyopaque, + }, +}; +pub const SendInner = struct { + after: AfterType, slice: Pseudoslice, count: usize, security: union(enum) { @@ -24,6 +34,6 @@ pub const Job = union(enum) { accept, handshake: struct { state: enum { recv, send }, count: usize }, recv: struct { count: usize }, - send: SendType, + send: SendInner, close, }; diff --git a/src/core/pseudoslice.zig b/src/core/pseudoslice.zig index e3a9660..c4d519b 100644 --- a/src/core/pseudoslice.zig +++ b/src/core/pseudoslice.zig @@ -21,7 +21,7 @@ pub const Pseudoslice = struct { /// Operates like a slice. That means it does not capture the end. /// Start is an inclusive bound and end is an exclusive bound. - pub fn get(self: *Pseudoslice, start: usize, end: usize) []const u8 { + pub fn get(self: *const Pseudoslice, start: usize, end: usize) []const u8 { assert(end >= start); assert(self.shared.len >= end - start); const clamped_end = @min(end, self.len); diff --git a/src/http/context.zig b/src/http/context.zig index f10470b..bd2ea3a 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -2,6 +2,8 @@ const std = @import("std"); const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/context"); +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; + const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; const Provision = @import("provision.zig").Provision; @@ -9,17 +11,20 @@ const Provision = @import("provision.zig").Provision; const Request = @import("request.zig").Request; const Response = @import("response.zig").Response; const ResponseSetOptions = Response.ResponseSetOptions; +const Mime = @import("mime.zig").Mime; +const _SSE = @import("sse.zig").SSE; const Runtime = @import("tardy").Runtime; const Task = @import("tardy").Task; +const TaskFn = @import("tardy").TaskFn; const raw_respond = @import("server.zig").raw_respond; // Context is dependent on the server that gets created. -// This is because the trigger_task ends up being dependent. pub fn Context(comptime Server: type) type { return struct { const Self = @This(); + const SSE = _SSE(Server); allocator: std.mem.Allocator, runtime: *Runtime, /// The Request that triggered this handler. @@ -32,38 +37,82 @@ pub fn Context(comptime Server: type) type { provision: *Provision, triggered: bool = false, - pub fn respond(self: *Self, options: ResponseSetOptions) void { + pub fn to_sse(self: *Self, then: TaskFn(bool, *SSE)) !void { assert(!self.triggered); self.triggered = true; - self.response.set(options); - - // this will write the data into the appropriate places. - const status = raw_respond(self.provision) catch unreachable; - self.provision.job = .{ - .send = .{ - .count = 0, - .slice = status.send, - .security = undefined, + self.response.set(.{ + .status = .OK, + .body = "", + .mime = Mime{ + .extension = ".sse", + .description = "Server-Sent Events", + .content_type = "text/event-stream", }, - }; + }); - const body = options.body orelse ""; + const headers = try self.provision.response.headers_into_buffer( + self.provision.buffer, + null, + ); + + const sse = try self.allocator.create(SSE); + sse.* = .{ .context = self }; + + const pslice = Pseudoslice.init(headers, "", self.provision.buffer); - const first_chunk = Server.prepare_send( + const first_chunk = try Server.prepare_send( self.runtime, self.provision, - body, + .{ .sse = .{ + .func = then, + .ctx = sse, + } }, + pslice, + ); + + try self.runtime.net.send( + self.provision, + Server.send_then_sse_task, + self.provision.socket, + first_chunk, + ); + } + + pub fn close(self: *Self) !void { + self.provision.job = .close; + try self.runtime.net.close( + self.provision, + Server.close_task, + self.provision.socket, + ); + } + + pub fn respond(self: *Self, options: ResponseSetOptions) !void { + assert(!self.triggered); + self.triggered = true; + self.response.set(options); + + const body = options.body orelse ""; + const headers = try self.provision.response.headers_into_buffer( + self.provision.buffer, @intCast(body.len), - ) catch unreachable; + ); + const pslice = Pseudoslice.init(headers, body, self.provision.buffer); + + const first_chunk = try Server.prepare_send( + self.runtime, + self.provision, + .recv, + pslice, + ); - self.runtime.net.send( - *Provision, - Server.send_task, + try self.runtime.net.send( self.provision, + Server.send_then_recv_task, self.provision.socket, first_chunk, - ) catch unreachable; + ); } }; } diff --git a/src/http/router.zig b/src/http/router.zig index a4c6434..d6efb1a 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -242,7 +242,7 @@ pub fn Router(comptime Server: type) type { .status = .@"Not Modified", .mime = Mime.HTML, .body = "", - }); + }) catch unreachable; return; } @@ -253,7 +253,7 @@ pub fn Router(comptime Server: type) type { .status = .OK, .mime = mime, .body = bytes, - }); + }) catch unreachable; } }.handler_fn); diff --git a/src/http/server.zig b/src/http/server.zig index 99cee47..0f9c24b 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -15,6 +15,7 @@ const Response = @import("response.zig").Response; const Capture = @import("routing_trie.zig").Capture; const QueryMap = @import("routing_trie.zig").QueryMap; const ResponseSetOptions = Response.ResponseSetOptions; +const _SSE = @import("sse.zig").SSE; const Provision = @import("provision.zig").Provision; const Mime = @import("mime.zig").Mime; @@ -22,8 +23,9 @@ const _Router = @import("router.zig").Router; const _Route = @import("route.zig").Route; const HTTPError = @import("lib.zig").HTTPError; +const AfterType = @import("../core/job.zig").AfterType; + const Pool = @import("tardy").Pool; -pub const Threading = @import("tardy").TardyThreading; pub const Runtime = @import("tardy").Runtime; pub const Task = @import("tardy").Task; const TaskFn = @import("tardy").TaskFn; @@ -69,24 +71,91 @@ pub inline fn raw_respond(p: *Provision) !RecvStatus { return .{ .send = pseudo }; } -pub fn Server( - comptime security: Security, - comptime async_type: AsyncIOType, -) type { +/// These are various general configuration +/// options that are important for the actual framework. +/// +/// This includes various different options and limits +/// for interacting with the underlying network. +pub const ServerConfig = struct { + /// The allocator that server will use. + allocator: std.mem.Allocator, + /// Kernel Backlog Value. + size_backlog: u31 = 512, + /// Number of Maximum Concurrent Connections. + /// + /// This is applied PER thread if using multi-threading. + /// zzz will drop/close any connections greater + /// than this. + /// + /// You want to tune this to your expected number + /// of maximum connections. + /// + /// Default: 1024 + size_connections_max: u16 = 1024, + /// Maximum number of completions we can reap + /// with a single call of reap(). + /// + /// Default: 256 + size_completions_reap_max: u16 = 256, + /// Amount of allocated memory retained + /// after an arena is cleared. + /// + /// A higher value will increase memory usage but + /// should make allocators faster.Tardy + /// + /// A lower value will reduce memory usage but + /// will make allocators slower. + /// + /// Default: 1KB + size_connection_arena_retain: u32 = 1024, + /// Size of the buffer (in bytes) used for + /// interacting with the socket. + /// + /// Default: 4 KB. + size_socket_buffer: u32 = 1024 * 4, + /// Maximum size (in bytes) of the Recv buffer. + /// This is mainly a concern when you are reading in + /// large requests before responding. + /// + /// Default: 2MB. + size_recv_buffer_max: u32 = 1024 * 1024 * 2, + /// Maximum number of Headers in a Request/Response + /// + /// Default: 32 + num_header_max: u32 = 32, + /// Maximum number of Captures in a Route + /// + /// Default: 8 + num_captures_max: u32 = 8, + /// Maximum number of Queries in a URL + /// + /// Default: 8 + num_queries_max: u32 = 8, + /// Maximum size (in bytes) of the Request. + /// + /// Default: 2MB. + size_request_max: u32 = 1024 * 1024 * 2, + /// Maximum size (in bytes) of the Request URI. + /// + /// Default: 2KB. + size_request_uri_max: u32 = 1024 * 2, +}; + +pub fn Server(comptime security: Security) type { const TLSContextType = comptime if (security == .tls) TLSContext else void; const TLSType = comptime if (security == .tls) ?TLS else void; - const Tardy = TardyCreator(async_type); return struct { const Self = @This(); pub const Context = _Context(Self); pub const Router = _Router(Self); pub const Route = _Route(Self); + pub const SSE = _SSE(Self); allocator: std.mem.Allocator, - tardy: Tardy, config: ServerConfig, addr: std.net.Address, tls_ctx: TLSContextType, + router: *const Router, fn route_and_respond(runtime: *Runtime, p: *Provision, router: *const Router) !RecvStatus { route: { @@ -159,82 +228,6 @@ pub fn Server( return try raw_respond(p); } - /// These are various general configuration - /// options that are important for the actual framework. - /// - /// This includes various different options and limits - /// for interacting with the underlying network. - pub const ServerConfig = struct { - /// The allocator that server will use. - allocator: std.mem.Allocator, - /// HTTP Request Router. - router: *Router, - /// Threading Model to use. - /// - /// Default: .auto - threading: Threading = .auto, - /// Kernel Backlog Value. - size_backlog: u31 = 512, - /// Number of Maximum Concurrent Connections. - /// - /// This is applied PER thread if using multi-threading. - /// zzz will drop/close any connections greater - /// than this. - /// - /// You want to tune this to your expected number - /// of maximum connections. - /// - /// Default: 1024 - size_connections_max: u16 = 1024, - /// Maximum number of completions we can reap - /// with a single call of reap(). - /// - /// Default: 256 - size_completions_reap_max: u16 = 256, - /// Amount of allocated memory retained - /// after an arena is cleared. - /// - /// A higher value will increase memory usage but - /// should make allocators faster.Tardy - /// - /// A lower value will reduce memory usage but - /// will make allocators slower. - /// - /// Default: 1KB - size_connection_arena_retain: u32 = 1024, - /// Size of the buffer (in bytes) used for - /// interacting with the socket. - /// - /// Default: 4 KB. - size_socket_buffer: u32 = 1024 * 4, - /// Maximum size (in bytes) of the Recv buffer. - /// This is mainly a concern when you are reading in - /// large requests before responding. - /// - /// Default: 2MB. - size_recv_buffer_max: u32 = 1024 * 1024 * 2, - /// Maximum number of Headers in a Request/Response - /// - /// Default: 32 - num_header_max: u32 = 32, - /// Maximum number of Captures in a Route - /// - /// Default: 8 - num_captures_max: u32 = 8, - /// Maximum number of Queries in a URL - /// - /// Default: 8 - num_queries_max: u32 = 8, - /// Maximum size (in bytes) of the Request. - /// - /// Default: 2MB. - size_request_max: u32 = 1024 * 1024 * 2, - /// Maximum size (in bytes) of the Request URI. - /// - /// Default: 2KB. - size_request_uri_max: u32 = 1024 * 2, - }; - pub fn init(config: ServerConfig) Self { const tls_ctx = switch (comptime security) { .tls => |inner| TLSContext.init(.{ @@ -250,16 +243,10 @@ pub fn Server( return Self{ .allocator = config.allocator, - .tardy = Tardy.init(.{ - .allocator = config.allocator, - .threading = config.threading, - .size_tasks_max = config.size_connections_max, - .size_aio_jobs_max = config.size_connections_max, - .size_aio_reap_max = config.size_completions_reap_max, - }) catch unreachable, .config = config, .addr = undefined, .tls_ctx = tls_ctx, + .router = undefined, }; } @@ -267,8 +254,6 @@ pub fn Server( if (comptime security == .tls) { self.tls_ctx.deinit(); } - - self.tardy.deinit(); } fn create_socket(self: *const Self) !std.posix.socket_t { @@ -306,16 +291,9 @@ pub fn Server( ); } - try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); return socket; } - /// If you are using a custom implementation that does NOT rely - /// on TCP/IP, you can SKIP calling this method and just set the - /// socket value yourself. - /// - /// This is only allowed on certain targets that do not have TCP/IP - /// support. pub fn bind(self: *Self, host: []const u8, port: u16) !void { assert(host.len > 0); assert(port > 0); @@ -328,7 +306,7 @@ pub fn Server( }; } - fn close_task(rt: *Runtime, _: *const Task, provision: *Provision) !void { + pub fn close_task(rt: *Runtime, _: void, provision: *Provision) !void { assert(provision.job == .close); const server_socket = rt.storage.get("server_socket", std.posix.socket_t); const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); @@ -363,29 +341,21 @@ pub fn Server( if (!accept_queued.*) { accept_queued.* = true; try rt.net.accept( - std.posix.socket_t, - accept_task, server_socket, + accept_task, server_socket, ); } } - fn accept_task(rt: *Runtime, t: *const Task, server_socket: std.posix.socket_t) !void { - const child_socket = t.result.?.socket; - + fn accept_task(rt: *Runtime, child_socket: std.posix.socket_t, socket: std.posix.socket_t) !void { const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); const accept_queued = rt.storage.get_ptr("accept_queued", bool); accept_queued.* = false; if (rt.scheduler.tasks.clean() >= 2) { accept_queued.* = true; - try rt.net.accept( - std.posix.socket_t, - accept_task, - server_socket, - server_socket, - ); + try rt.net.accept(socket, accept_task, socket); } if (!Cross.socket.is_valid(child_socket)) { @@ -395,7 +365,7 @@ pub fn Server( // This should never fail. It means that we have a dangling item. assert(pool.clean() > 0); - const borrowed = pool.borrow_hint(t.index) catch unreachable; + const borrowed = pool.borrow() catch unreachable; log.info("{d} - accepting connection", .{borrowed.index}); log.debug( @@ -412,6 +382,7 @@ pub fn Server( // Store the index of this item. provision.index = @intCast(borrowed.index); provision.socket = child_socket; + log.debug("provision buffer size: {d}", .{provision.buffer.len}); switch (comptime security) { .tls => |_| { @@ -424,51 +395,38 @@ pub fn Server( tls_ptr.* = tls_ctx.create(child_socket) catch |e| { log.err("{d} - tls creation failed={any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSCreationFailed; }; const recv_buf = tls_ptr.*.?.start_handshake() catch |e| { log.err("{d} - tls start handshake failed={any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSStartHandshakeFailed; }; provision.job = .{ .handshake = .{ .state = .recv, .count = 0 } }; - try rt.net.recv( - *Provision, - handshake_task, - borrowed.item, - child_socket, - recv_buf, - ); + try rt.net.recv(borrowed.item, handshake_task, child_socket, recv_buf); }, .plain => { provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv( - *Provision, - recv_task, - provision, - child_socket, - provision.buffer, - ); + try rt.net.recv(provision, recv_task, child_socket, provision.buffer); }, } } - fn recv_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { + fn recv_task(rt: *Runtime, length: i32, provision: *Provision) !void { assert(provision.job == .recv); - const length: i32 = t.result.?.value; - const config = rt.storage.get_const_ptr("config", ServerConfig); + const router = rt.storage.get_const_ptr("router", Router); const recv_job = &provision.job.recv; // If the socket is closed. if (length <= 0) { provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return; } @@ -488,7 +446,7 @@ pub fn Server( break :blk tls_ptr.*.?.decrypt(pre_recv_buffer) catch |e| { log.err("{d} - decrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSDecryptFailed; }; }, @@ -596,7 +554,7 @@ pub fn Server( } if (!provision.request.expect_body()) { - break :status route_and_respond(rt, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, router) catch unreachable; } // Everything after here is a Request that is expecting a body. @@ -623,7 +581,7 @@ pub fn Server( log.debug("{d} - got whole body with header", .{provision.index}); const body_end = header_end + difference; provision.request.set_body(provision.recv_buffer.items[header_end..body_end]); - break :status route_and_respond(rt, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, router) catch unreachable; } else { // Partial Body log.debug("{d} - got partial body with header", .{provision.index}); @@ -636,7 +594,7 @@ pub fn Server( log.debug("{d} - got body of length 0", .{provision.index}); // Body of Length 0. provision.request.set_body(""); - break :status route_and_respond(rt, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, router) catch unreachable; } else { // Got only header. log.debug("{d} - got all header aka no body", .{provision.index}); @@ -687,7 +645,7 @@ pub fn Server( if (job.count >= request_length) { provision.request.set_body(provision.recv_buffer.items[header_end..request_length]); - break :status route_and_respond(rt, provision, config.router) catch unreachable; + break :status route_and_respond(rt, provision, router) catch unreachable; } else { break :status .recv; } @@ -703,9 +661,8 @@ pub fn Server( }, .recv => { try rt.net.recv( - *Provision, - recv_task, provision, + recv_task, provision.socket, provision.buffer, ); @@ -722,12 +679,13 @@ pub fn Server( const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSEncryptFailed; }; provision.job = .{ .send = .{ + .after = .recv, .slice = pslice.*, .count = @intCast(plain_buffer.len), .security = .{ @@ -740,16 +698,16 @@ pub fn Server( }; try rt.net.send( - *Provision, - send_task, provision, + send_then_recv_task, provision.socket, - provision.buffer, + encrypted_buffer, ); }, .plain => { provision.job = .{ .send = .{ + .after = .recv, .slice = pslice.*, .count = 0, .security = .plain, @@ -757,11 +715,10 @@ pub fn Server( }; try rt.net.send( - *Provision, - send_task, provision, + send_then_recv_task, provision.socket, - provision.buffer, + plain_buffer, ); }, } @@ -769,10 +726,8 @@ pub fn Server( } } - fn handshake_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { + fn handshake_task(rt: *Runtime, length: i32, provision: *Provision) !void { assert(security == .tls); - const length: i32 = t.result.?.value; - if (comptime security == .tls) { const tls_slice = rt.storage.get("tls_slice", []TLSType); @@ -787,14 +742,14 @@ pub fn Server( if (length <= 0) { log.debug("handshake connection closed", .{}); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSHandshakeClosed; } if (handshake_job.count >= 50) { log.debug("handshake taken too many cycles", .{}); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSHandshakeTooManyCycles; } @@ -814,45 +769,25 @@ pub fn Server( .recv => |buf| { log.debug("queueing recv in handshake", .{}); handshake_job.state = .recv; - try rt.net.recv( - *Provision, - handshake_task, - provision, - provision.socket, - buf, - ); + try rt.net.recv(provision, handshake_task, provision.socket, buf); }, .send => |buf| { log.debug("queueing send in handshake", .{}); handshake_job.state = .send; - try rt.net.send( - *Provision, - handshake_task, - provision, - provision.socket, - buf, - ); + try rt.net.send(provision, handshake_task, provision.socket, buf); }, .complete => { log.debug("handshake complete", .{}); provision.job = .{ .recv = .{ .count = 0 } }; - try rt.net.recv( - *Provision, - recv_task, - provision, - provision.socket, - provision.buffer, - ); + try rt.net.recv(provision, recv_task, provision.socket, provision.buffer); }, } } } /// Prepares the provision send_job and returns the first send chunk - pub fn prepare_send(rt: *Runtime, provision: *Provision, body: []const u8, content_length: ?u32) ![]const u8 { + pub fn prepare_send(rt: *Runtime, provision: *Provision, after: AfterType, pslice: Pseudoslice) ![]const u8 { const config = rt.storage.get_const_ptr("config", ServerConfig); - const headers = try provision.response.headers_into_buffer(provision.buffer, content_length); - var pslice = Pseudoslice.init(headers, body, provision.buffer); const plain_buffer = pslice.get(0, config.size_socket_buffer); switch (comptime security) { @@ -864,12 +799,13 @@ pub fn Server( const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSEncryptFailed; }; provision.job = .{ .send = .{ + .after = after, .slice = pslice, .count = @intCast(plain_buffer.len), .security = .{ @@ -886,6 +822,7 @@ pub fn Server( .plain => { provision.job = .{ .send = .{ + .after = after, .slice = pslice, .count = 0, .security = .plain, @@ -897,218 +834,222 @@ pub fn Server( } } - pub fn send_task(rt: *Runtime, t: *const Task, provision: *Provision) !void { - assert(provision.job == .send); - const length: i32 = t.result.?.value; - - const config = rt.storage.get_const_ptr("config", ServerConfig); + pub const send_then_sse_task = send_then(struct { + fn inner(rt: *Runtime, success: bool, provision: *Provision) !void { + const send_job = provision.job.send; + assert(send_job.after == .sse); + const func: TaskFn(bool, *anyopaque) = @ptrCast(@alignCast(send_job.after.sse.func)); + const ctx: *anyopaque = @ptrCast(@alignCast(send_job.after.sse.ctx)); + try @call(.auto, func, .{ rt, success, ctx }); - // If the socket is closed. - if (length <= 0) { - provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); - return; + if (!success) { + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + } } + }.inner); - const send_job = &provision.job.send; + pub const send_then_recv_task = send_then(struct { + fn inner(rt: *Runtime, success: bool, provision: *Provision) !void { + if (!success) { + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return; + } - log.debug("{d} - send triggered", .{provision.index}); - const send_count: usize = @intCast(length); - log.debug("{d} - send length: {d}", .{ provision.index, send_count }); + const config = rt.storage.get_const_ptr("config", ServerConfig); - switch (comptime security) { - .tls => { - assert(send_job.security == .tls); + log.debug("{d} - queueing a new recv", .{provision.index}); + _ = provision.arena.reset(.{ + .retain_with_limit = config.size_connection_arena_retain, + }); + provision.recv_buffer.clearRetainingCapacity(); + provision.job = .{ .recv = .{ .count = 0 } }; - const tls_slice = rt.storage.get("tls_slice", []TLSType); + try rt.net.recv( + provision, + recv_task, + provision.socket, + provision.buffer, + ); + } + }.inner); + + fn send_then(comptime func: TaskFn(bool, *Provision)) TaskFn(i32, *Provision) { + return struct { + fn send_then_inner(rt: *Runtime, length: i32, provision: *Provision) !void { + assert(provision.job == .send); + const config = rt.storage.get_const_ptr("config", ServerConfig); + + // If the socket is closed. + if (length <= 0) { + try @call(.always_inline, func, .{ rt, false, provision }); + return; + } - const job_tls = &send_job.security.tls; - job_tls.encrypted_count += send_count; + const send_job = &provision.job.send; - if (job_tls.encrypted_count >= job_tls.encrypted.len) { - if (send_job.count >= send_job.slice.len) { - // All done sending. - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; + log.debug("{d} - send triggered", .{provision.index}); + const send_count: usize = @intCast(length); + log.debug("{d} - sent length: {d}", .{ provision.index, send_count }); - try rt.net.recv( - *Provision, - recv_task, - provision, - provision.socket, - provision.buffer, - ); - } else { - // Queue a new chunk up for sending. - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); + switch (comptime security) { + .tls => { + assert(send_job.security == .tls); - const inner_slice = send_job.slice.get( - send_job.count, - send_job.count + config.size_socket_buffer, - ); + const tls_slice = rt.storage.get("tls_slice", []TLSType); - send_job.count += @intCast(inner_slice.len); + const job_tls = &send_job.security.tls; + job_tls.encrypted_count += send_count; + + if (job_tls.encrypted_count >= job_tls.encrypted.len) { + if (send_job.count >= send_job.slice.len) { + try @call(.always_inline, func, .{ rt, true, provision }); + } else { + // Queue a new chunk up for sending. + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); + + const inner_slice = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); + + send_job.count += @intCast(inner_slice.len); + + const tls_ptr: *TLSType = &tls_slice[provision.index]; + assert(tls_ptr.* != null); + + const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { + log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); + provision.job = .close; + try rt.net.close(provision, close_task, provision.socket); + return error.TLSEncryptFailed; + }; + + job_tls.encrypted = encrypted; + job_tls.encrypted_count = 0; + + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + job_tls.encrypted, + ); + } + } else { + log.debug( + "{d} - sending next encrypted chunk starting at index {d}", + .{ provision.index, job_tls.encrypted_count }, + ); + + const remainder = job_tls.encrypted[job_tls.encrypted_count..]; + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + remainder, + ); + } + }, + .plain => { + assert(send_job.security == .plain); + send_job.count += send_count; - const tls_ptr: *TLSType = &tls_slice[provision.index]; - assert(tls_ptr.* != null); + if (send_job.count >= send_job.slice.len) { + try @call(.always_inline, func, .{ rt, true, provision }); + } else { + log.debug( + "{d} - sending next chunk starting at index {d}", + .{ provision.index, send_job.count }, + ); - const encrypted = tls_ptr.*.?.encrypt(inner_slice) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); - return error.TLSEncryptFailed; - }; + const plain_buffer = send_job.slice.get( + send_job.count, + send_job.count + config.size_socket_buffer, + ); - job_tls.encrypted = encrypted; - job_tls.encrypted_count = 0; + log.debug("socket buffer size: {d}", .{config.size_socket_buffer}); - try rt.net.send( - *Provision, - send_task, - provision, - provision.socket, - job_tls.encrypted, - ); - } - } else { - log.debug( - "{d} - sending next encrypted chunk starting at index {d}", - .{ provision.index, job_tls.encrypted_count }, - ); - - const remainder = job_tls.encrypted[job_tls.encrypted_count..]; - try rt.net.send( - *Provision, - send_task, - provision, - provision.socket, - remainder, - ); + log.debug("{d} - chunk ends at: {d}", .{ + provision.index, + plain_buffer.len + send_job.count, + }); + + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + plain_buffer, + ); + } + }, } - }, - .plain => { - assert(send_job.security == .plain); - send_job.count += send_count; + } + }.send_then_inner; + } - if (send_job.count >= send_job.slice.len) { - log.debug("{d} - queueing a new recv", .{provision.index}); - _ = provision.arena.reset(.{ - .retain_with_limit = config.size_connection_arena_retain, - }); - provision.recv_buffer.clearRetainingCapacity(); - provision.job = .{ .recv = .{ .count = 0 } }; + pub inline fn serve(self: *Self, router: *const Router, rt: *Runtime) !void { + self.router = router; - try rt.net.recv( - *Provision, - recv_task, - provision, - provision.socket, - provision.buffer, - ); - } else { - log.debug( - "{d} - sending next chunk starting at index {d}", - .{ provision.index, send_job.count }, - ); + log.info("server listening...", .{}); + log.info("security mode: {s}", .{@tagName(security)}); - const plain_buffer = send_job.slice.get( - send_job.count, - send_job.count + config.size_socket_buffer, - ); + const socket = try self.create_socket(); + try std.posix.bind(socket, &self.addr.any, self.addr.getOsSockLen()); + try std.posix.listen(socket, self.config.size_backlog); + + const provision_pool = try rt.allocator.create(Pool(Provision)); + provision_pool.* = try Pool(Provision).init( + rt.allocator, + self.config.size_connections_max, + Provision.init_hook, + self.config, + ); - log.debug("{d} - chunk ends at: {d}", .{ - provision.index, - plain_buffer.len + send_job.count, - }); + try rt.storage.store_ptr("router", @constCast(router)); + try rt.storage.store_ptr("provision_pool", provision_pool); + try rt.storage.store_alloc("config", self.config); - try rt.net.send( - *Provision, - recv_task, - provision, - provision.socket, - plain_buffer, - ); + if (comptime security == .tls) { + const tls_slice = try rt.allocator( + TLSType, + self.config.size_connections_max, + ); + if (comptime security == .tls) { + for (tls_slice) |*tls| { + tls.* = null; } - }, + } + + // since slices are fat pointers... + try rt.storage.store_alloc("tls_slice", tls_slice); + try rt.storage.store_ptr("tls_ctx", &self.tls_ctx); } - } - pub fn listen(self: *Self) !void { - log.info("server listening...", .{}); - log.info("security mode: {s}", .{@tagName(security)}); + try rt.storage.store_alloc("server_socket", socket); + try rt.storage.store_alloc("accept_queued", true); - try self.tardy.entry( - struct { - fn rt_start(rt: *Runtime, alloc: std.mem.Allocator, zzz: *Self) !void { - const socket = try zzz.create_socket(); - try std.posix.listen(socket, zzz.config.size_backlog); - - const provision_pool = try alloc.create(Pool(Provision)); - provision_pool.* = try Pool(Provision).init( - alloc, - zzz.config.size_connections_max, - Provision.init_hook, - zzz.config, - ); - - try rt.storage.store_ptr("provision_pool", provision_pool); - try rt.storage.store_ptr("config", &zzz.config); - - if (comptime security == .tls) { - const tls_slice = try alloc.alloc( - TLSType, - zzz.config.size_connections_max, - ); - if (comptime security == .tls) { - for (tls_slice) |*tls| { - tls.* = null; - } - } + try rt.net.accept(socket, accept_task, socket); + } - // since slices are fat pointers... - try rt.storage.store_alloc("tls_slice", tls_slice); - try rt.storage.store_ptr("tls_ctx", &zzz.tls_ctx); - } + pub inline fn clean(rt: *Runtime) !void { + // clean up socket. + const server_socket = rt.storage.get("server_socket", std.posix.socket_t); + std.posix.close(server_socket); - try rt.storage.store_alloc("server_socket", socket); - try rt.storage.store_alloc("accept_queued", true); + // clean up provision pool. + const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); + provision_pool.deinit(Provision.deinit_hook, rt.allocator); + rt.allocator.destroy(provision_pool); - try rt.net.accept( - std.posix.socket_t, - accept_task, - socket, - socket, - ); - } - }.rt_start, - self, - struct { - fn rt_end(rt: *Runtime, alloc: std.mem.Allocator, _: anytype) void { - // clean up socket. - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - std.posix.close(server_socket); - - // clean up provision pool. - const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - provision_pool.deinit(Provision.deinit_hook, alloc); - alloc.destroy(provision_pool); - - // clean up TLS. - if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); - alloc.free(tls_slice); - } - } - }.rt_end, - void, - ); + // clean up TLS. + if (comptime security == .tls) { + const tls_slice = rt.storage.get("tls_slice", []TLSType); + rt.allocator.free(tls_slice); + } } }; } diff --git a/src/http/sse.zig b/src/http/sse.zig new file mode 100644 index 0000000..216706c --- /dev/null +++ b/src/http/sse.zig @@ -0,0 +1,92 @@ +const std = @import("std"); + +const Pseudoslice = @import("../core/pseudoslice.zig").Pseudoslice; + +const Provision = @import("provision.zig").Provision; +const _Context = @import("context.zig").Context; + +const TaskFn = @import("tardy").TaskFn; +const Runtime = @import("tardy").Runtime; + +const SSEMessage = struct { + id: ?[]const u8 = null, + event: ?[]const u8 = null, + data: ?[]const u8 = null, + retry: ?u64 = null, +}; + +pub fn SSE(comptime Server: type) type { + const Context = _Context(Server); + return struct { + const Self = @This(); + context: *Context, + + pub fn send( + self: *Self, + options: SSEMessage, + then_context: anytype, + then: TaskFn(bool, @TypeOf(then_context)), + ) !void { + var index: usize = 0; + const buffer = self.context.provision.buffer; + + if (options.id) |id| { + const buf = try std.fmt.bufPrint( + buffer[index..], + "id: {s}\n", + .{id}, + ); + index += buf.len; + } + + if (options.event) |event| { + const buf = try std.fmt.bufPrint( + buffer[index..], + "event: {s}\n", + .{event}, + ); + index += buf.len; + } + + if (options.data) |data| { + const buf = try std.fmt.bufPrint( + buffer[index..], + "data: {s}\n", + .{data}, + ); + index += buf.len; + } + + if (options.retry) |retry| { + const buf = try std.fmt.bufPrint( + buffer[index..], + "retry: {d}\n", + .{retry}, + ); + index += buf.len; + } + + buffer[index] = '\n'; + index += 1; + + const pslice = Pseudoslice.init(buffer[0..index], "", buffer); + + const first_chunk = Server.prepare_send( + self.context.runtime, + self.context.provision, + .{ .sse = .{ + .func = then, + .ctx = then_context, + } }, + pslice, + ) catch unreachable; + + self.context.runtime.net.send( + self.context.provision, + Server.send_then_sse_task, + self.context.provision.socket, + first_chunk, + ) catch unreachable; + } + }; +} From 39dd6459322160f3b4f9cd52be0500da34e77adf Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sat, 2 Nov 2024 01:58:13 -0700 Subject: [PATCH 09/17] fix(examples): ensure they compile --- examples/http/basic/main.zig | 9 +++- examples/http/benchmark/main.zig | 4 +- examples/http/fs/main.zig | 51 +++++++++++++--------- examples/http/minram/main.zig | 69 ++++++++++++++++++++---------- examples/http/multithread/main.zig | 49 ++++++++++++++------- examples/http/sse/main.zig | 15 ++++--- examples/http/tls/main.zig | 52 +++++++++++++--------- examples/http/valgrind/main.zig | 41 ++++++++++++------ src/http/context.zig | 6 ++- src/http/router.zig | 54 ++++++++++------------- src/http/server.zig | 62 +++++++++++++-------------- src/http/sse.zig | 2 + 12 files changed, 249 insertions(+), 165 deletions(-) diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index a9fc435..d8eb98f 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -22,6 +22,8 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); + // Creating our Tardy instance that + // will spawn our runtimes. var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, @@ -45,14 +47,19 @@ pub fn main() !void { \\ ; + // This is the standard response and what you + // will usually be using. This will send to the + // client and then continue to await more requests. ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }); + }) catch unreachable; } }.handler_fn)); + // This provides the entry function into the Tardy runtime. This will run + // exactly once inside of each runtime (each thread gets a single runtime). try t.entry( struct { fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index a66ef47..f789861 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -42,7 +42,7 @@ fn hi_handler(ctx: *Context) void { .status = .@"Internal Server Error", .mime = http.Mime.HTML, .body = "Out of Memory!", - }); + }) catch unreachable; return; }; @@ -50,7 +50,7 @@ fn hi_handler(ctx: *Context) void { .status = .OK, .mime = http.Mime.HTML, .body = body, - }); + }) catch unreachable; } pub fn main() !void { diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig index 63b332b..72667f4 100644 --- a/examples/http/fs/main.zig +++ b/examples/http/fs/main.zig @@ -1,12 +1,17 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/fs"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/fs"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; -const Router = Server.Router; pub fn main() !void { const host: []const u8 = "0.0.0.0"; @@ -18,6 +23,12 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .auto, + }); + defer t.deinit(); + var router = Router.init(allocator); defer router.deinit(); @@ -36,32 +47,32 @@ pub fn main() !void { .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }); + }) catch unreachable; } }.handler_fn)); try router.serve_route("/kill", Route.init().get(struct { pub fn handler_fn(ctx: *Context) void { ctx.runtime.stop(); - - ctx.respond(.{ - .status = .OK, - .mime = http.Mime.HTML, - .body = "", - }); } }.handler_fn)); try router.serve_fs_dir("/static", "./examples/http/fs/static"); - var server = Server.init(.{ - .router = &router, - .allocator = allocator, - .threading = .auto, - .size_connections_max = 256, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ .allocator = alloc }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig index 6b56221..b515785 100644 --- a/examples/http/minram/main.zig +++ b/examples/http/minram/main.zig @@ -1,12 +1,17 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/minram"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/minram"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; -const Router = Server.Router; pub fn main() !void { const host: []const u8 = "0.0.0.0"; @@ -18,6 +23,17 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); + const max_conn = 16; + + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + .size_tasks_max = max_conn, + .size_aio_jobs_max = max_conn, + .size_aio_reap_max = max_conn, + }); + defer t.deinit(); + var router = Router.init(allocator); defer router.deinit(); @@ -36,26 +52,35 @@ pub fn main() !void { .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }); + }) catch unreachable; } }.handler_fn)); - var server = Server.init(.{ - .router = &router, - .allocator = allocator, - .threading = .single, - .size_backlog = 32, - .size_connections_max = 16, - .size_connection_arena_retain = 64, - .size_completions_reap_max = 8, - .size_socket_buffer = 512, - .num_header_max = 32, - .num_captures_max = 0, - .size_request_max = 2048, - .size_request_uri_max = 256, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ + .allocator = alloc, + .size_backlog = 32, + .size_connections_max = max_conn, + .size_connection_arena_retain = 64, + .size_completions_reap_max = 8, + .size_socket_buffer = 512, + .num_header_max = 32, + .num_captures_max = 0, + .size_request_max = 2048, + .size_request_uri_max = 256, + }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig index 0a8b864..2a6bab6 100644 --- a/examples/http/multithread/main.zig +++ b/examples/http/multithread/main.zig @@ -1,13 +1,17 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/multithread"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/multithread"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; +const Server = http.Server(.plain); +const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; -const Router = Server.Router; fn hi_handler(ctx: *Context) void { const name = ctx.captures[0].string; @@ -35,7 +39,7 @@ fn hi_handler(ctx: *Context) void { .status = .@"Internal Server Error", .mime = http.Mime.HTML, .body = "Out of Memory!", - }); + }) catch unreachable; return; }; @@ -43,7 +47,7 @@ fn hi_handler(ctx: *Context) void { .status = .OK, .mime = http.Mime.HTML, .body = body, - }); + }) catch unreachable; } fn redir_handler(ctx: *Context) void { @@ -52,7 +56,7 @@ fn redir_handler(ctx: *Context) void { .status = .@"Permanent Redirect", .mime = http.Mime.HTML, .body = "", - }); + }) catch unreachable; } fn post_handler(ctx: *Context) void { @@ -62,7 +66,7 @@ fn post_handler(ctx: *Context) void { .status = .OK, .mime = http.Mime.HTML, .body = "", - }); + }) catch unreachable; } pub fn main() !void { @@ -76,6 +80,12 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .auto, + }); + defer t.deinit(); + var router = Router.init(allocator); defer router.deinit(); @@ -84,13 +94,20 @@ pub fn main() !void { try router.serve_route("/redirect", Route.init().get(redir_handler)); try router.serve_route("/post", Route.init().post(post_handler)); - var server = Server.init(.{ - .router = &router, - .allocator = allocator, - .threading = .auto, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ .allocator = alloc }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/sse/main.zig b/examples/http/sse/main.zig index 3518b75..885afc8 100644 --- a/examples/http/sse/main.zig +++ b/examples/http/sse/main.zig @@ -17,6 +17,9 @@ const Context = Server.Context; const Route = Server.Route; const SSE = Server.SSE; +// When using SSE, you end up leaving the various abstractions that zzz has setup for you +// and you begin programming more against the tardy runtime. + const SSEBroadcastContext = struct { sse: *SSE, channel: *Channel(usize), @@ -25,14 +28,14 @@ const SSEBroadcastContext = struct { fn sse_send(_: *Runtime, value_opt: ?*const usize, ctx: *SSEBroadcastContext) !void { if (value_opt) |value| { const data = try std.fmt.allocPrint( - ctx.sse.context.allocator, + ctx.sse.allocator, "value: {d}", .{value.*}, ); try ctx.sse.send(.{ .data = data }, ctx, sse_recv); } else { - const broadcast = ctx.sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + const broadcast = ctx.sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); broadcast.unsubscribe(ctx.channel); try ctx.sse.context.close(); } @@ -43,7 +46,7 @@ fn sse_recv(_: *Runtime, success: bool, ctx: *SSEBroadcastContext) !void { try ctx.channel.recv(ctx, sse_send); } else { log.debug("channel closed", .{}); - const broadcast = ctx.sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + const broadcast = ctx.sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); broadcast.unsubscribe(ctx.channel); } } @@ -51,13 +54,13 @@ fn sse_recv(_: *Runtime, success: bool, ctx: *SSEBroadcastContext) !void { fn sse_init(rt: *Runtime, success: bool, sse: *SSE) !void { if (!success) { // on failure, it'll auto close after - // the sse task. + // the sse initalization task runs. log.err("sse initalization failed", .{}); return; } - const broadcast = sse.context.runtime.storage.get_ptr("broadcast", Broadcast(usize)); - const context = try sse.context.allocator.create(SSEBroadcastContext); + const broadcast = sse.runtime.storage.get_ptr("broadcast", Broadcast(usize)); + const context = try sse.allocator.create(SSEBroadcastContext); context.* = .{ .sse = sse, .channel = try broadcast.subscribe(rt, 10) }; try context.channel.recv(context, sse_send); } diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig index 219255b..ce72070 100644 --- a/examples/http/tls/main.zig +++ b/examples/http/tls/main.zig @@ -1,16 +1,19 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/tls"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/tls"); -const Server = http.Server(.{ - .tls = .{ - .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, - .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, - .cert_name = "CERTIFICATE", - .key_name = "EC PRIVATE KEY", - }, -}, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.{ .tls = .{ + .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, + .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, + .cert_name = "CERTIFICATE", + .key_name = "EC PRIVATE KEY", +} }); const Context = Server.Context; const Route = Server.Route; @@ -49,27 +52,36 @@ pub fn main() !void { .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }); + }) catch unreachable; } }.handler_fn)); try router.serve_route("/kill", Route.init().get(struct { pub fn handler_fn(ctx: *Context) void { - ctx.respond(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); + ctx.runtime.stop(); } }.handler_fn)); - var server = Server.init(.{ - .router = &router, + var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, }); - defer server.deinit(); + defer t.deinit(); - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ .allocator = alloc }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig index 5d6d3a6..ae5aae2 100644 --- a/examples/http/valgrind/main.zig +++ b/examples/http/valgrind/main.zig @@ -1,12 +1,16 @@ const std = @import("std"); +const log = std.log.scoped(.@"examples/valgrind"); const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/valgrind"); -const Server = http.Server(.plain, .auto); +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; -const Router = Server.Router; pub fn main() !void { const host: []const u8 = "0.0.0.0"; @@ -34,27 +38,36 @@ pub fn main() !void { .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }); + }) catch unreachable; } }.handler_fn)); try router.serve_route("/kill", Route.init().get(struct { pub fn handler_fn(ctx: *Context) void { - ctx.respond(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); + ctx.runtime.stop(); } }.handler_fn)); - var server = http.Server(.plain, .auto).init(.{ - .router = &router, + var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, }); - defer server.deinit(); + defer t.deinit(); - try server.bind(host, port); - try server.listen(); + try t.entry( + struct { + fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + var server = Server.init(.{ .allocator = alloc }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + &router, + struct { + fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + Server.clean(rt) catch unreachable; + } + }.exit, + {}, + ); } diff --git a/src/http/context.zig b/src/http/context.zig index bd2ea3a..b4a9fa1 100644 --- a/src/http/context.zig +++ b/src/http/context.zig @@ -57,7 +57,11 @@ pub fn Context(comptime Server: type) type { ); const sse = try self.allocator.create(SSE); - sse.* = .{ .context = self }; + sse.* = .{ + .context = self, + .runtime = self.runtime, + .allocator = self.allocator, + }; const pslice = Pseudoslice.init(headers, "", self.provision.buffer); diff --git a/src/http/router.zig b/src/http/router.zig index d6efb1a..8ebfb89 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -48,18 +48,15 @@ pub fn Router(comptime Server: type) type { buffer: []u8, }; - fn open_file_task(rt: *Runtime, t: *const Task, provision: *FileProvision) !void { - errdefer { - provision.context.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - } + fn open_file_task(rt: *Runtime, fd: std.posix.fd_t, provision: *FileProvision) !void { + errdefer provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }) catch unreachable; - const fd = t.result.?.fd; if (fd <= -1) { - provision.context.respond(.{ + try provision.context.respond(.{ .status = .@"Not Found", .mime = Mime.HTML, .body = "File Not Found", @@ -69,31 +66,26 @@ pub fn Router(comptime Server: type) type { provision.fd = fd; try rt.fs.read( - *FileProvision, - read_file_task, provision, + read_file_task, fd, provision.buffer, 0, ); } - fn read_file_task(rt: *Runtime, t: *const Task, provision: *FileProvision) !void { - errdefer { - provision.context.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - } + fn read_file_task(rt: *Runtime, result: i32, provision: *FileProvision) !void { + errdefer provision.context.respond(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }) catch unreachable; - const result: i32 = t.result.?.value; if (result <= 0) { // If we are done reading... try rt.fs.close( - *FileProvision, - close_file_task, provision, + close_file_task, provision.fd, ); return; @@ -117,17 +109,16 @@ pub fn Router(comptime Server: type) type { provision.offset += length; try rt.fs.read( - *FileProvision, - read_file_task, provision, + read_file_task, provision.fd, provision.buffer, provision.offset, ); } - fn close_file_task(_: *Runtime, _: *const Task, provision: *FileProvision) !void { - provision.context.respond(.{ + fn close_file_task(_: *Runtime, _: void, provision: *FileProvision) !void { + try provision.context.respond(.{ .status = .OK, .mime = provision.mime, .body = provision.list.items[0..], @@ -146,7 +137,7 @@ pub fn Router(comptime Server: type) type { .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", - }); + }) catch unreachable; return; }; @@ -167,7 +158,7 @@ pub fn Router(comptime Server: type) type { .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", - }); + }) catch unreachable; return; }; @@ -183,16 +174,15 @@ pub fn Router(comptime Server: type) type { // We also need to support chunked encoding. // It makes a lot more sense for files atleast. ctx.runtime.fs.open( - *FileProvision, - open_file_task, provision, + open_file_task, file_path, ) catch { ctx.respond(.{ .status = .@"Internal Server Error", .mime = Mime.HTML, .body = "", - }); + }) catch unreachable; return; }; } diff --git a/src/http/server.zig b/src/http/server.zig index 0f9c24b..eb6b04d 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -308,14 +308,14 @@ pub fn Server(comptime security: Security) type { pub fn close_task(rt: *Runtime, _: void, provision: *Provision) !void { assert(provision.job == .close); - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const config = rt.storage.get_const_ptr("config", ServerConfig); + const server_socket = rt.storage.get("__zzz_server_socket", std.posix.socket_t); + const pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); log.info("{d} - closing connection", .{provision.index}); if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); @@ -337,7 +337,7 @@ pub fn Server(comptime security: Security) type { pool.release(provision.index); - const accept_queued = rt.storage.get_ptr("accept_queued", bool); + const accept_queued = rt.storage.get_ptr("__zzz_accept_queued", bool); if (!accept_queued.*) { accept_queued.* = true; try rt.net.accept( @@ -349,8 +349,8 @@ pub fn Server(comptime security: Security) type { } fn accept_task(rt: *Runtime, child_socket: std.posix.socket_t, socket: std.posix.socket_t) !void { - const pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); - const accept_queued = rt.storage.get_ptr("accept_queued", bool); + const pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); + const accept_queued = rt.storage.get_ptr("__zzz_accept_queued", bool); accept_queued.* = false; if (rt.scheduler.tasks.clean() >= 2) { @@ -386,8 +386,8 @@ pub fn Server(comptime security: Security) type { switch (comptime security) { .tls => |_| { - const tls_ctx = rt.storage.get_const_ptr("tls_ctx", TLSContextType); - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_ctx = rt.storage.get_const_ptr("__zzz_tls_ctx", TLSContextType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* == null); @@ -418,8 +418,8 @@ pub fn Server(comptime security: Security) type { fn recv_task(rt: *Runtime, length: i32, provision: *Provision) !void { assert(provision.job == .recv); - const config = rt.storage.get_const_ptr("config", ServerConfig); - const router = rt.storage.get_const_ptr("router", Router); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); + const router = rt.storage.get_const_ptr("__zzz_router", Router); const recv_job = &provision.job.recv; @@ -439,7 +439,7 @@ pub fn Server(comptime security: Security) type { const recv_buffer = blk: { switch (comptime security) { .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); @@ -672,7 +672,7 @@ pub fn Server(comptime security: Security) type { switch (comptime security) { .tls => |_| { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); @@ -729,7 +729,7 @@ pub fn Server(comptime security: Security) type { fn handshake_task(rt: *Runtime, length: i32, provision: *Provision) !void { assert(security == .tls); if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); assert(provision.job == .handshake); const handshake_job = &provision.job.handshake; @@ -761,7 +761,7 @@ pub fn Server(comptime security: Security) type { } catch |e| { log.err("{d} - tls handshake failed={any}", .{ provision.index, e }); provision.job = .close; - try rt.net.close(*Provision, close_task, provision, provision.socket); + try rt.net.close(provision, close_task, provision.socket); return error.TLSHandshakeRecvFailed; }; @@ -787,12 +787,12 @@ pub fn Server(comptime security: Security) type { /// Prepares the provision send_job and returns the first send chunk pub fn prepare_send(rt: *Runtime, provision: *Provision, after: AfterType, pslice: Pseudoslice) ![]const u8 { - const config = rt.storage.get_const_ptr("config", ServerConfig); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); const plain_buffer = pslice.get(0, config.size_socket_buffer); switch (comptime security) { .tls => { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); @@ -857,7 +857,7 @@ pub fn Server(comptime security: Security) type { return; } - const config = rt.storage.get_const_ptr("config", ServerConfig); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); log.debug("{d} - queueing a new recv", .{provision.index}); _ = provision.arena.reset(.{ @@ -879,7 +879,7 @@ pub fn Server(comptime security: Security) type { return struct { fn send_then_inner(rt: *Runtime, length: i32, provision: *Provision) !void { assert(provision.job == .send); - const config = rt.storage.get_const_ptr("config", ServerConfig); + const config = rt.storage.get_const_ptr("__zzz_config", ServerConfig); // If the socket is closed. if (length <= 0) { @@ -897,7 +897,7 @@ pub fn Server(comptime security: Security) type { .tls => { assert(send_job.security == .tls); - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); const job_tls = &send_job.security.tls; job_tls.encrypted_count += send_count; @@ -1009,12 +1009,12 @@ pub fn Server(comptime security: Security) type { self.config, ); - try rt.storage.store_ptr("router", @constCast(router)); - try rt.storage.store_ptr("provision_pool", provision_pool); - try rt.storage.store_alloc("config", self.config); + try rt.storage.store_ptr("__zzz_router", @constCast(router)); + try rt.storage.store_ptr("__zzz_provision_pool", provision_pool); + try rt.storage.store_alloc("__zzz_config", self.config); if (comptime security == .tls) { - const tls_slice = try rt.allocator( + const tls_slice = try rt.allocator.alloc( TLSType, self.config.size_connections_max, ); @@ -1025,29 +1025,29 @@ pub fn Server(comptime security: Security) type { } // since slices are fat pointers... - try rt.storage.store_alloc("tls_slice", tls_slice); - try rt.storage.store_ptr("tls_ctx", &self.tls_ctx); + try rt.storage.store_alloc("__zzz_tls_slice", tls_slice); + try rt.storage.store_alloc("__zzz_tls_ctx", self.tls_ctx); } - try rt.storage.store_alloc("server_socket", socket); - try rt.storage.store_alloc("accept_queued", true); + try rt.storage.store_alloc("__zzz_server_socket", socket); + try rt.storage.store_alloc("__zzz_accept_queued", true); try rt.net.accept(socket, accept_task, socket); } pub inline fn clean(rt: *Runtime) !void { // clean up socket. - const server_socket = rt.storage.get("server_socket", std.posix.socket_t); + const server_socket = rt.storage.get("__zzz_server_socket", std.posix.socket_t); std.posix.close(server_socket); // clean up provision pool. - const provision_pool = rt.storage.get_ptr("provision_pool", Pool(Provision)); + const provision_pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); provision_pool.deinit(Provision.deinit_hook, rt.allocator); rt.allocator.destroy(provision_pool); // clean up TLS. if (comptime security == .tls) { - const tls_slice = rt.storage.get("tls_slice", []TLSType); + const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); rt.allocator.free(tls_slice); } } diff --git a/src/http/sse.zig b/src/http/sse.zig index 216706c..24abf9d 100644 --- a/src/http/sse.zig +++ b/src/http/sse.zig @@ -20,6 +20,8 @@ pub fn SSE(comptime Server: type) type { return struct { const Self = @This(); context: *Context, + allocator: std.mem.Allocator, + runtime: *Runtime, pub fn send( self: *Self, From 8622c844447b4ed153d28120ab9755d539c07ae0 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sat, 2 Nov 2024 15:46:54 -0700 Subject: [PATCH 10/17] feat(route): pass data directly into route --- examples/http/basic/main.zig | 24 ++++---- examples/http/benchmark/main.zig | 29 +++------- examples/http/fs/main.zig | 10 ++-- examples/http/minram/main.zig | 8 +-- examples/http/multithread/main.zig | 40 ++++++-------- examples/http/sse/main.zig | 25 ++++----- examples/http/tls/main.zig | 12 ++-- examples/http/valgrind/main.zig | 12 ++-- src/http/route.zig | 88 +++++++++++++++++------------- src/http/router.zig | 22 ++++---- src/http/server.zig | 14 ++++- 11 files changed, 141 insertions(+), 143 deletions(-) diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index d8eb98f..b973175 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -16,7 +16,6 @@ const Route = Server.Route; pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - const max_conn = 512; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); @@ -27,34 +26,36 @@ pub fn main() !void { var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, - .size_tasks_max = max_conn, - .size_aio_jobs_max = max_conn, - .size_aio_reap_max = max_conn, }); defer t.deinit(); var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { - const body = + const num: i8 = 12; + + try router.serve_route("/", Route.init().get(&num, struct { + pub fn handler_fn(ctx: *Context, id: *const i8) !void { + const body_fmt = \\ \\ \\ \\

Hello, World!

+ \\

id: {d}

\\ \\ ; + const body = try std.fmt.allocPrint(ctx.allocator, body_fmt, .{id.*}); + // This is the standard response and what you // will usually be using. This will send to the // client and then continue to await more requests. - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }) catch unreachable; + }); } }.handler_fn)); @@ -63,10 +64,7 @@ pub fn main() !void { try t.entry( struct { fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ - .allocator = alloc, - .size_connections_max = max_conn, - }); + var server = Server.init(.{ .allocator = alloc }); try server.bind(host, port); try server.serve(r, rt); } diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index f789861..f0d5a9d 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -17,10 +17,10 @@ pub const std_options = .{ .log_level = .err, }; -fn hi_handler(ctx: *Context) void { +fn hi_handler(ctx: *Context, _: void) !void { const name = ctx.captures[0].string; - const body = std.fmt.allocPrint(ctx.allocator, + const body = try std.fmt.allocPrint(ctx.allocator, \\ \\ \\ @@ -37,26 +37,18 @@ fn hi_handler(ctx: *Context) void { \\ \\ \\ - , .{name}) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Out of Memory!", - }) catch unreachable; - return; - }; + , .{name}); - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body, - }) catch unreachable; + }); } pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - const max_conn = 1024; var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true }){}; const allocator = gpa.allocator(); @@ -65,25 +57,18 @@ pub fn main() !void { var t = try Tardy.init(.{ .allocator = allocator, .threading = .auto, - .size_tasks_max = max_conn, - .size_aio_jobs_max = max_conn, - .size_aio_reap_max = max_conn, }); defer t.deinit(); var router = Router.init(allocator); defer router.deinit(); try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", Route.init().get(hi_handler)); + try router.serve_route("/hi/%s", Route.init().get({}, hi_handler)); try t.entry( struct { fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ - .allocator = alloc, - .size_connections_max = max_conn, - }); - + var server = Server.init(.{ .allocator = alloc }); try server.bind(host, port); try server.serve(r, rt); } diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig index 72667f4..d726e72 100644 --- a/examples/http/fs/main.zig +++ b/examples/http/fs/main.zig @@ -32,8 +32,8 @@ pub fn main() !void { var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -43,16 +43,16 @@ pub fn main() !void { \\ ; - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }) catch unreachable; + }); } }.handler_fn)); try router.serve_route("/kill", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + pub fn handler_fn(ctx: *Context, _: void) !void { ctx.runtime.stop(); } }.handler_fn)); diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig index b515785..bf2c53d 100644 --- a/examples/http/minram/main.zig +++ b/examples/http/minram/main.zig @@ -37,8 +37,8 @@ pub fn main() !void { var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -48,11 +48,11 @@ pub fn main() !void { \\ ; - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }) catch unreachable; + }); } }.handler_fn)); diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig index 2a6bab6..8ce90bd 100644 --- a/examples/http/multithread/main.zig +++ b/examples/http/multithread/main.zig @@ -13,11 +13,11 @@ const Router = Server.Router; const Context = Server.Context; const Route = Server.Route; -fn hi_handler(ctx: *Context) void { +fn hi_handler(ctx: *Context, _: void) !void { const name = ctx.captures[0].string; const greeting = ctx.queries.get("greeting") orelse "Hi"; - const body = std.fmt.allocPrint(ctx.allocator, + const body = try std.fmt.allocPrint(ctx.allocator, \\ \\ \\ @@ -34,39 +34,33 @@ fn hi_handler(ctx: *Context) void { \\ \\ \\ - , .{ greeting, name }) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = http.Mime.HTML, - .body = "Out of Memory!", - }) catch unreachable; - return; - }; - - ctx.respond(.{ + , .{ greeting, name }); + + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body, - }) catch unreachable; + }); } -fn redir_handler(ctx: *Context) void { - ctx.response.headers.add("Location", "/hi/redirect") catch unreachable; - ctx.respond(.{ +fn redir_handler(ctx: *Context, _: void) !void { + try ctx.response.headers.add("Location", "/hi/redirect"); + + try ctx.respond(.{ .status = .@"Permanent Redirect", .mime = http.Mime.HTML, .body = "", - }) catch unreachable; + }); } -fn post_handler(ctx: *Context) void { +fn post_handler(ctx: *Context, _: void) !void { log.debug("Body: {s}", .{ctx.request.body}); - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = "", - }) catch unreachable; + }); } pub fn main() !void { @@ -90,9 +84,9 @@ pub fn main() !void { defer router.deinit(); try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/hi/%s", Route.init().get(hi_handler)); - try router.serve_route("/redirect", Route.init().get(redir_handler)); - try router.serve_route("/post", Route.init().post(post_handler)); + try router.serve_route("/hi/%s", Route.init().get({}, hi_handler)); + try router.serve_route("/redirect", Route.init().get({}, redir_handler)); + try router.serve_route("/post", Route.init().post({}, post_handler)); try t.entry( struct { diff --git a/examples/http/sse/main.zig b/examples/http/sse/main.zig index 885afc8..4d6bbf0 100644 --- a/examples/http/sse/main.zig +++ b/examples/http/sse/main.zig @@ -65,23 +65,22 @@ fn sse_init(rt: *Runtime, success: bool, sse: *SSE) !void { try context.channel.recv(context, sse_send); } -fn sse_handler(ctx: *Context) void { +fn sse_handler(ctx: *Context, _: void) !void { log.debug("going into sse mode", .{}); - ctx.to_sse(sse_init) catch unreachable; + try ctx.to_sse(sse_init); } -fn msg_handler(ctx: *Context) void { +fn msg_handler(ctx: *Context, broadcast: *Broadcast(usize)) !void { log.debug("message handler", .{}); - const broadcast = ctx.runtime.storage.get_ptr("broadcast", Broadcast(usize)); - broadcast.send(0) catch unreachable; - ctx.respond(.{ + try broadcast.send(0); + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = "", - }) catch unreachable; + }); } -fn kill_handler(ctx: *Context) void { +fn kill_handler(ctx: *Context, _: void) !void { ctx.runtime.stop(); } @@ -106,14 +105,14 @@ pub fn main() !void { var router = Router.init(allocator); defer router.deinit(); - try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); - try router.serve_route("/kill", Route.init().get(kill_handler)); - try router.serve_route("/stream", Route.init().get(sse_handler)); - try router.serve_route("/message", Route.init().post(msg_handler)); - var broadcast = try Broadcast(usize).init(allocator, max_conn); defer broadcast.deinit(); + try router.serve_embedded_file("/", http.Mime.HTML, @embedFile("index.html")); + try router.serve_route("/kill", Route.init().get({}, kill_handler)); + try router.serve_route("/stream", Route.init().get({}, sse_handler)); + try router.serve_route("/message", Route.init().post(&broadcast, msg_handler)); + const EntryParams = struct { router: *const Router, broadcast: *Broadcast(usize), diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig index ce72070..b0684f2 100644 --- a/examples/http/tls/main.zig +++ b/examples/http/tls/main.zig @@ -34,8 +34,8 @@ pub fn main() !void { try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); - try router.serve_route("/", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) void { const body = \\ \\ @@ -48,16 +48,16 @@ pub fn main() !void { \\ ; - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }) catch unreachable; + }); } }.handler_fn)); - try router.serve_route("/kill", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/kill", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { ctx.runtime.stop(); } }.handler_fn)); diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig index ae5aae2..638b8b0 100644 --- a/examples/http/valgrind/main.zig +++ b/examples/http/valgrind/main.zig @@ -23,8 +23,8 @@ pub fn main() !void { var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -34,16 +34,16 @@ pub fn main() !void { \\ ; - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], - }) catch unreachable; + }); } }.handler_fn)); - try router.serve_route("/kill", Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + try router.serve_route("/kill", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { ctx.runtime.stop(); } }.handler_fn)); diff --git a/src/http/route.zig b/src/http/route.zig index d00d09b..4b46e4f 100644 --- a/src/http/route.zig +++ b/src/http/route.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/route"); const Method = @import("method.zig").Method; const Request = @import("request.zig").Request; @@ -9,8 +10,16 @@ const Context = @import("context.zig").Context; pub fn Route(comptime Server: type) type { return struct { const Self = @This(); - pub const HandlerFn = *const fn (context: *Context(Server)) void; - handlers: [9]?HandlerFn = [_]?HandlerFn{null} ** 9, + pub const HandlerFn = *const fn (context: *Context(Server), data: *const anyopaque) anyerror!void; + fn TypedHandlerFn(comptime T: type) type { + return *const fn (context: *Context(Server), data: T) anyerror!void; + } + const HandlerWithData = struct { + handler: HandlerFn, + data: *anyopaque, + }; + + handlers: [9]?HandlerWithData = [_]?HandlerWithData{null} ** 9, fn method_to_index(method: Method) u32 { return switch (method) { @@ -27,7 +36,7 @@ pub fn Route(comptime Server: type) type { } pub fn init() Self { - return Self{ .handlers = [_]?HandlerFn{null} ** 9 }; + return Self{ .handlers = [_]?HandlerWithData{null} ** 9 }; } /// Returns a comma delinated list of allowed Methods for this route. This @@ -58,62 +67,65 @@ pub fn Route(comptime Server: type) type { } } - pub fn get_handler(self: Self, method: Method) ?HandlerFn { + pub fn get_handler(self: Self, method: Method) ?HandlerWithData { return self.handlers[method_to_index(method)]; } - pub fn get(self: Self, handler_fn: HandlerFn) Self { + inline fn inner_route( + comptime method: Method, + self: Self, + data: anytype, + handler_fn: TypedHandlerFn(@TypeOf(data)), + ) Self { + // You can either give a void (if you don't want to pass data through) or a pointer. + comptime assert(@typeInfo(@TypeOf(data)) == .Pointer or @typeInfo(@TypeOf(data)) == .Void); + const inner_data = switch (comptime @typeInfo(@TypeOf(data))) { + .Void => @constCast(&data), + .Pointer => data, + else => unreachable, + }; var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.GET)] = handler_fn; + new_handlers[comptime method_to_index(method)] = .{ + .handler = @ptrCast(handler_fn), + .data = inner_data, + }; return Self{ .handlers = new_handlers }; } - pub fn head(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.HEAD)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn get(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.GET, self, data, handler_fn); } - pub fn post(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.POST)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn head(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.HEAD, self, data, handler_fn); } - pub fn put(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PUT)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn post(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.POST, self, data, handler_fn); } - pub fn delete(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.DELETE)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn put(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.PUT, self, data, handler_fn); } - pub fn connect(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.CONNECT)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn delete(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.DELETE, self, data, handler_fn); } - pub fn options(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.OPTIONS)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn connect(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.CONNECT, self, data, handler_fn); } - pub fn trace(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.TRACE)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn options(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.OPTIONS, self, data, handler_fn); } - pub fn patch(self: Self, handler_fn: HandlerFn) Self { - var new_handlers = self.handlers; - new_handlers[comptime method_to_index(.PATCH)] = handler_fn; - return Self{ .handlers = new_handlers }; + pub fn trace(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.TRACE, self, data, handler_fn); + } + + pub fn patch(self: Self, data: anytype, handler_fn: TypedHandlerFn(@TypeOf(data))) Self { + return inner_route(.PATCH, self, data, handler_fn); } }; } diff --git a/src/http/router.zig b/src/http/router.zig index 8ebfb89..b945b94 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -203,20 +203,20 @@ pub fn Router(comptime Server: type) type { comptime bytes: []const u8, ) !void { assert(!self.locked); - const route = Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + const route = Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { if (comptime builtin.mode == .Debug) { // Don't Cache in Debug. - ctx.response.headers.add( + try ctx.response.headers.add( "Cache-Control", "no-cache", - ) catch unreachable; + ); } else { // Cache for 30 days. - ctx.response.headers.add( + try ctx.response.headers.add( "Cache-Control", comptime std.fmt.comptimePrint("max-age={d}", .{std.time.s_per_day * 30}), - ) catch unreachable; + ); } // If our static item is greater than 1KB, @@ -224,26 +224,26 @@ pub fn Router(comptime Server: type) type { if (comptime bytes.len > 1024) { @setEvalBranchQuota(1_000_000); const etag = comptime std.fmt.comptimePrint("\"{d}\"", .{std.hash.Wyhash.hash(0, bytes)}); - ctx.response.headers.add("ETag", etag[0..]) catch unreachable; + try ctx.response.headers.add("ETag", etag[0..]); if (ctx.request.headers.get("If-None-Match")) |match| { if (std.mem.eql(u8, etag, match)) { - ctx.respond(.{ + try ctx.respond(.{ .status = .@"Not Modified", .mime = Mime.HTML, .body = "", - }) catch unreachable; + }); return; } } } - ctx.respond(.{ + try ctx.respond(.{ .status = .OK, .mime = mime, .body = bytes, - }) catch unreachable; + }); } }.handler_fn); diff --git a/src/http/server.zig b/src/http/server.zig index eb6b04d..3e37007 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -163,7 +163,7 @@ pub fn Server(comptime security: Security) type { if (found) |f| { const handler = f.route.get_handler(p.request.method); - if (handler) |func| { + if (handler) |h_with_data| { const context: *Context = try p.arena.allocator().create(Context); context.* = .{ .allocator = p.arena.allocator(), @@ -176,7 +176,17 @@ pub fn Server(comptime security: Security) type { .provision = p, }; - @call(.auto, func, .{context}); + @call(.auto, h_with_data.handler, .{ context, h_with_data.data }) catch |e| { + log.err("\"{s}\" handler failed with error: {}", .{ p.request.uri, e }); + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + return try raw_respond(p); + }; + return .spawned; } else { // If we match the route but not the method. From bf9060ea12327485d4db093067c4a0d182127444 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 3 Nov 2024 01:57:03 -0700 Subject: [PATCH 11/17] feat(tardy): use stronger typing --- examples/http/basic/main.zig | 12 +++++------ examples/http/benchmark/main.zig | 12 +++++------ examples/http/fs/main.zig | 12 +++++------ examples/http/minram/main.zig | 12 +++++------ examples/http/multithread/main.zig | 12 +++++------ examples/http/sse/main.zig | 12 +++++------ examples/http/tls/main.zig | 14 ++++++------- examples/http/valgrind/main.zig | 12 +++++------ src/http/provision.zig | 4 ++-- src/http/route.zig | 8 ++++---- src/http/router.zig | 33 ++++++------------------------ src/http/server.zig | 10 +++++---- 12 files changed, 67 insertions(+), 86 deletions(-) diff --git a/examples/http/basic/main.zig b/examples/http/basic/main.zig index b973175..2089e66 100644 --- a/examples/http/basic/main.zig +++ b/examples/http/basic/main.zig @@ -62,19 +62,19 @@ pub fn main() !void { // This provides the entry function into the Tardy runtime. This will run // exactly once inside of each runtime (each thread gets a single runtime). try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/benchmark/main.zig b/examples/http/benchmark/main.zig index f0d5a9d..573bc4a 100644 --- a/examples/http/benchmark/main.zig +++ b/examples/http/benchmark/main.zig @@ -66,19 +66,19 @@ pub fn main() !void { try router.serve_route("/hi/%s", Route.init().get({}, hi_handler)); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/fs/main.zig b/examples/http/fs/main.zig index d726e72..36d067c 100644 --- a/examples/http/fs/main.zig +++ b/examples/http/fs/main.zig @@ -51,7 +51,7 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", Route.init().get(struct { + try router.serve_route("/kill", Route.init().get({}, struct { pub fn handler_fn(ctx: *Context, _: void) !void { ctx.runtime.stop(); } @@ -60,19 +60,19 @@ pub fn main() !void { try router.serve_fs_dir("/static", "./examples/http/fs/static"); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { + fn exit(rt: *Runtime, _: void) !void { Server.clean(rt) catch unreachable; } }.exit, - {}, ); } diff --git a/examples/http/minram/main.zig b/examples/http/minram/main.zig index bf2c53d..2d43a6f 100644 --- a/examples/http/minram/main.zig +++ b/examples/http/minram/main.zig @@ -57,10 +57,11 @@ pub fn main() !void { }.handler_fn)); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { + fn entry(rt: *Runtime, r: *const Router) !void { var server = Server.init(.{ - .allocator = alloc, + .allocator = rt.allocator, .size_backlog = 32, .size_connections_max = max_conn, .size_connection_arena_retain = 64, @@ -75,12 +76,11 @@ pub fn main() !void { try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/multithread/main.zig b/examples/http/multithread/main.zig index 8ce90bd..fa87c1f 100644 --- a/examples/http/multithread/main.zig +++ b/examples/http/multithread/main.zig @@ -89,19 +89,19 @@ pub fn main() !void { try router.serve_route("/post", Route.init().post({}, post_handler)); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/sse/main.zig b/examples/http/sse/main.zig index 4d6bbf0..8a9c767 100644 --- a/examples/http/sse/main.zig +++ b/examples/http/sse/main.zig @@ -119,12 +119,13 @@ pub fn main() !void { }; try t.entry( + EntryParams{ .router = &router, .broadcast = &broadcast }, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, params: EntryParams) !void { + fn entry(rt: *Runtime, params: EntryParams) !void { try rt.storage.store_ptr("broadcast", params.broadcast); var server = Server.init(.{ - .allocator = alloc, + .allocator = rt.allocator, .size_connections_max = max_conn, }); @@ -132,12 +133,11 @@ pub fn main() !void { try server.serve(params.router, rt); } }.entry, - EntryParams{ .router = &router, .broadcast = &broadcast }, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/tls/main.zig b/examples/http/tls/main.zig index b0684f2..9c4e786 100644 --- a/examples/http/tls/main.zig +++ b/examples/http/tls/main.zig @@ -35,7 +35,7 @@ pub fn main() !void { try router.serve_embedded_file("/embed/pico.min.css", http.Mime.CSS, @embedFile("embed/pico.min.css")); try router.serve_route("/", Route.init().get({}, struct { - pub fn handler_fn(ctx: *Context, _: void) void { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -69,19 +69,19 @@ pub fn main() !void { defer t.deinit(); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/examples/http/valgrind/main.zig b/examples/http/valgrind/main.zig index 638b8b0..4d6bbf5 100644 --- a/examples/http/valgrind/main.zig +++ b/examples/http/valgrind/main.zig @@ -55,19 +55,19 @@ pub fn main() !void { defer t.deinit(); try t.entry( + &router, struct { - fn entry(rt: *Runtime, alloc: std.mem.Allocator, r: *const Router) !void { - var server = Server.init(.{ .allocator = alloc }); + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); try server.bind(host, port); try server.serve(r, rt); } }.entry, - &router, + {}, struct { - fn exit(rt: *Runtime, _: std.mem.Allocator, _: void) void { - Server.clean(rt) catch unreachable; + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); } }.exit, - {}, ); } diff --git a/src/http/provision.zig b/src/http/provision.zig index 7802468..995006c 100644 --- a/src/http/provision.zig +++ b/src/http/provision.zig @@ -21,7 +21,7 @@ pub const Provision = struct { response: Response, stage: Stage, - pub fn init_hook(provisions: []Provision, config: anytype) void { + pub fn init_hook(provisions: []Provision, config: ServerConfig) void { for (provisions) |*provision| { provision.job = .empty; provision.socket = undefined; @@ -52,7 +52,7 @@ pub const Provision = struct { } } - pub fn deinit_hook(provisions: []Provision, allocator: anytype) void { + pub fn deinit_hook(provisions: []Provision, allocator: std.mem.Allocator) void { for (provisions) |*provision| { allocator.free(provision.buffer); provision.recv_buffer.deinit(); diff --git a/src/http/route.zig b/src/http/route.zig index 4b46e4f..26f41bc 100644 --- a/src/http/route.zig +++ b/src/http/route.zig @@ -10,13 +10,13 @@ const Context = @import("context.zig").Context; pub fn Route(comptime Server: type) type { return struct { const Self = @This(); - pub const HandlerFn = *const fn (context: *Context(Server), data: *const anyopaque) anyerror!void; + pub const HandlerFn = *const fn (context: *Context(Server), data: *anyopaque) anyerror!void; fn TypedHandlerFn(comptime T: type) type { return *const fn (context: *Context(Server), data: T) anyerror!void; } const HandlerWithData = struct { handler: HandlerFn, - data: *anyopaque, + data: usize, }; handlers: [9]?HandlerWithData = [_]?HandlerWithData{null} ** 9, @@ -80,8 +80,8 @@ pub fn Route(comptime Server: type) type { // You can either give a void (if you don't want to pass data through) or a pointer. comptime assert(@typeInfo(@TypeOf(data)) == .Pointer or @typeInfo(@TypeOf(data)) == .Void); const inner_data = switch (comptime @typeInfo(@TypeOf(data))) { - .Void => @constCast(&data), - .Pointer => data, + .Void => @intFromPtr(&data), + .Pointer => @intFromPtr(data), else => unreachable, }; var new_handlers = self.handlers; diff --git a/src/http/router.zig b/src/http/router.zig index b945b94..c1f59da 100644 --- a/src/http/router.zig +++ b/src/http/router.zig @@ -128,18 +128,11 @@ pub fn Router(comptime Server: type) type { pub fn serve_fs_dir(self: *Self, comptime url_path: []const u8, comptime dir_path: []const u8) !void { assert(!self.locked); - const route = Route.init().get(struct { - pub fn handler_fn(ctx: *Context) void { + const route = Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const search_path = ctx.captures[0].remaining; - const file_path = std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }) catch unreachable; - return; - }; + const file_path = try std.fmt.allocPrintZ(ctx.allocator, "{s}/{s}", .{ dir_path, search_path }); // TODO: Ensure that paths cannot go out of scope and reference data that they shouldn't be allowed to. // Very important. @@ -153,14 +146,7 @@ pub fn Router(comptime Server: type) type { } }; - const provision = ctx.allocator.create(FileProvision) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }) catch unreachable; - return; - }; + const provision = try ctx.allocator.create(FileProvision); provision.* = .{ .mime = mime, @@ -173,18 +159,11 @@ pub fn Router(comptime Server: type) type { // We also need to support chunked encoding. // It makes a lot more sense for files atleast. - ctx.runtime.fs.open( + try ctx.runtime.fs.open( provision, open_file_task, file_path, - ) catch { - ctx.respond(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }) catch unreachable; - return; - }; + ); } }.handler_fn); diff --git a/src/http/server.zig b/src/http/server.zig index 3e37007..4036bc1 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -176,7 +176,10 @@ pub fn Server(comptime security: Security) type { .provision = p, }; - @call(.auto, h_with_data.handler, .{ context, h_with_data.data }) catch |e| { + @call(.auto, h_with_data.handler, .{ + context, + @as(*anyopaque, @ptrFromInt(h_with_data.data)), + }) catch |e| { log.err("\"{s}\" handler failed with error: {}", .{ p.request.uri, e }); p.response.set(.{ .status = .@"Internal Server Error", @@ -326,7 +329,6 @@ pub fn Server(comptime security: Security) type { if (comptime security == .tls) { const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); - const tls_ptr: *TLSType = &tls_slice[provision.index]; assert(tls_ptr.* != null); tls_ptr.*.?.deinit(); @@ -1015,8 +1017,8 @@ pub fn Server(comptime security: Security) type { provision_pool.* = try Pool(Provision).init( rt.allocator, self.config.size_connections_max, - Provision.init_hook, self.config, + Provision.init_hook, ); try rt.storage.store_ptr("__zzz_router", @constCast(router)); @@ -1052,7 +1054,7 @@ pub fn Server(comptime security: Security) type { // clean up provision pool. const provision_pool = rt.storage.get_ptr("__zzz_provision_pool", Pool(Provision)); - provision_pool.deinit(Provision.deinit_hook, rt.allocator); + provision_pool.deinit(rt.allocator, Provision.deinit_hook); rt.allocator.destroy(provision_pool); // clean up TLS. From 188ee14906644c81a59eefad2809479ec9533a81 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 3 Nov 2024 16:42:03 -0800 Subject: [PATCH 12/17] fix(server): fixing BSD resolveIp --- build.zig.zon | 5 ++--- src/http/server.zig | 8 +++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index e203715..b6ce6ed 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,9 +4,8 @@ .minimum_zig_version = "0.13.0", .dependencies = .{ .tardy = .{ - //.url = "git+https://github.com/mookums/tardy#12a2bcae25b34c4eb34ab5e3b5db101823a61cd6", - //.hash = "122073200a2412251ad1e7eb322d9d04868a1444f98bdb4d47bb630491806c8d36d4", - .path = "../tardy", + .url = "git+https://github.com/mookums/tardy?ref=context#b2c64a6088cbb373630980e736ba737e647193dd", + .hash = "12203ae553263a2f8afec10455eb58b8ea3cf5519b7fa88602a6d60fd9109e13b438", }, .bearssl = .{ .url = "https://github.com/mookums/bearssl-zig/archive/37a96eee56fe2543579bbc6da148ca886f3dd32b.tar.gz", diff --git a/src/http/server.zig b/src/http/server.zig index 4036bc1..bec92a7 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const tag = builtin.os.tag; const assert = std.debug.assert; const log = std.log.scoped(.@"zzz/http/server"); @@ -312,9 +313,10 @@ pub fn Server(comptime security: Security) type { assert(port > 0); self.addr = blk: { - switch (comptime builtin.os.tag) { - .windows => break :blk try std.net.Address.parseIp(host, port), - else => break :blk try std.net.Address.resolveIp(host, port), + if (comptime tag.isDarwin() or tag.isBSD() or tag == .windows) { + break :blk try std.net.Address.parseIp(host, port); + } else { + break :blk try std.net.Address.resolveIp(host, port); } }; } From 61d104e9650e3e2c447e13cce87dca8dafdd6087 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Tue, 5 Nov 2024 15:35:18 -0800 Subject: [PATCH 13/17] chore(tardy): update to latest --- build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index b6ce6ed..a9d9b60 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,8 +4,8 @@ .minimum_zig_version = "0.13.0", .dependencies = .{ .tardy = .{ - .url = "git+https://github.com/mookums/tardy?ref=context#b2c64a6088cbb373630980e736ba737e647193dd", - .hash = "12203ae553263a2f8afec10455eb58b8ea3cf5519b7fa88602a6d60fd9109e13b438", + .url = "git+https://github.com/mookums/tardy?ref=main#feae50e9bf60ac13f1d5d14a7d3346fcfe442fa8", + .hash = "122093168263d66adc14bbee5de6aa0d4a2600e7299cad2b66175feeb6ce8aaef173", }, .bearssl = .{ .url = "https://github.com/mookums/bearssl-zig/archive/37a96eee56fe2543579bbc6da148ca886f3dd32b.tar.gz", From ee97b6f14eaad411f1c00d68d2286e17de0381b7 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Fri, 8 Nov 2024 16:36:00 -0800 Subject: [PATCH 14/17] feat(server): make API more consistent across req/resp --- src/http/headers.zig | 12 +- src/http/provision.zig | 11 +- src/http/request.zig | 115 +++---- src/http/response.zig | 61 +--- src/http/server.zig | 716 +++++++++++++++++++---------------------- src/http/sse.zig | 24 +- 6 files changed, 403 insertions(+), 536 deletions(-) diff --git a/src/http/headers.zig b/src/http/headers.zig index 5c778d9..3ea3e5c 100644 --- a/src/http/headers.zig +++ b/src/http/headers.zig @@ -6,16 +6,16 @@ const CaseStringMap = @import("case_string_map.zig").CaseStringMap; pub const Headers = struct { allocator: std.mem.Allocator, map: CaseStringMap([]const u8), - num_header_max: u32, + count: u32, - pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Headers { + pub fn init(allocator: std.mem.Allocator, count: u32) !Headers { var map = CaseStringMap([]const u8).init(allocator); - try map.ensureTotalCapacity(num_header_max); + try map.ensureTotalCapacity(@intCast(count)); return Headers{ .allocator = allocator, .map = map, - .num_header_max = num_header_max, + .count = count, }; } @@ -25,11 +25,11 @@ pub const Headers = struct { pub fn add(self: *Headers, key: []const u8, value: []const u8) HTTPError!void { assert(std.mem.indexOfScalar(u8, key, ':') == null); - if (self.map.count() == self.num_header_max) return HTTPError.TooManyHeaders; + if (self.map.count() == self.count) return HTTPError.TooManyHeaders; self.map.putAssumeCapacity(key, value); } - pub fn get(self: Headers, key: []const u8) ?[]const u8 { + pub fn get(self: *Headers, key: []const u8) ?[]const u8 { return self.map.get(key); } diff --git a/src/http/provision.zig b/src/http/provision.zig index 995006c..c279f24 100644 --- a/src/http/provision.zig +++ b/src/http/provision.zig @@ -40,15 +40,8 @@ pub const Provision = struct { var queries = QueryMap.init(config.allocator); queries.ensureTotalCapacity(config.num_queries_max) catch unreachable; provision.queries = queries; - - provision.request = Request.init(config.allocator, .{ - .num_header_max = config.num_header_max, - .size_request_max = config.size_request_max, - .size_request_uri_max = config.size_request_uri_max, - }) catch unreachable; - provision.response = Response.init(config.allocator, .{ - .num_headers_max = config.num_header_max, - }) catch unreachable; + provision.request = Request.init(config.allocator, config.num_header_max) catch unreachable; + provision.response = Response.init(config.allocator, config.num_header_max) catch unreachable; } } diff --git a/src/http/request.zig b/src/http/request.zig index d0963a8..b58c3c0 100644 --- a/src/http/request.zig +++ b/src/http/request.zig @@ -6,35 +6,21 @@ const Headers = @import("lib.zig").Headers; const HTTPError = @import("lib.zig").HTTPError; const Method = @import("lib.zig").Method; -const RequestOptions = struct { - size_request_max: u32, - size_request_uri_max: u32, - num_header_max: u32, -}; - pub const Request = struct { allocator: std.mem.Allocator, - size_request_max: u32, - size_request_uri_max: u32, method: Method, uri: []const u8, - version: std.http.Version, + version: std.http.Version = .@"HTTP/1.1", headers: Headers, body: []const u8, /// This is for constructing a Request. - pub fn init(allocator: std.mem.Allocator, options: RequestOptions) !Request { - // The request size needs to be larger than the max URI size. - assert(options.size_request_max > options.size_request_uri_max); - + pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Request { return Request{ .allocator = allocator, - .headers = try Headers.init(allocator, options.num_header_max), - .size_request_max = options.size_request_max, - .size_request_uri_max = options.size_request_uri_max, + .headers = try Headers.init(allocator, num_header_max), .method = undefined, .uri = undefined, - .version = undefined, .body = undefined, }; } @@ -43,7 +29,12 @@ pub const Request = struct { self.headers.deinit(); } - pub fn parse_headers(self: *Request, bytes: []const u8) HTTPError!void { + const RequestParseOptions = struct { + size_request_max: u32, + size_request_uri_max: u32, + }; + + pub fn parse_headers(self: *Request, bytes: []const u8, options: RequestParseOptions) HTTPError!void { self.headers.clear(); var total_size: u32 = 0; var lines = std.mem.tokenizeAny(u8, bytes, "\r\n"); @@ -56,7 +47,7 @@ pub const Request = struct { while (lines.next()) |line| { total_size += @intCast(line.len); - if (total_size > self.size_request_max) { + if (total_size > options.size_request_max) { return HTTPError.ContentTooLarge; } @@ -68,16 +59,14 @@ pub const Request = struct { log.warn("invalid method: {s}", .{method_string}); return HTTPError.InvalidMethod; }; - self.set_method(method); const uri_string = chunks.next() orelse return HTTPError.MalformedRequest; - if (uri_string.len >= self.size_request_uri_max) return HTTPError.URITooLong; + if (uri_string.len >= options.size_request_uri_max) return HTTPError.URITooLong; if (uri_string[0] != '/') return HTTPError.MalformedRequest; - self.set_uri(uri_string); const version_string = chunks.next() orelse return HTTPError.MalformedRequest; if (!std.mem.eql(u8, version_string, "HTTP/1.1")) return HTTPError.HTTPVersionNotSupported; - self.set_version(.@"HTTP/1.1"); + self.set(.{ .method = method, .uri = uri_string }); // There shouldn't be anything else. if (chunks.next() != null) return HTTPError.MalformedRequest; @@ -93,20 +82,24 @@ pub const Request = struct { } } - pub fn set_method(self: *Request, method: Method) void { - self.method = method; - } + pub const RequestSetOptions = struct { + method: ?Method = null, + uri: ?[]const u8 = null, + body: ?[]const u8 = null, + }; - pub fn set_uri(self: *Request, uri: []const u8) void { - self.uri = uri; - } + pub fn set(self: *Request, options: RequestSetOptions) void { + if (options.method) |method| { + self.method = method; + } - pub fn set_version(self: *Request, version: std.http.Version) void { - self.version = version; - } + if (options.uri) |uri| { + self.uri = uri; + } - pub fn set_body(self: *Request, body: []const u8) void { - self.body = body; + if (options.body) |body| { + self.body = body; + } } /// Should this specific Request expect to capture a body. @@ -128,14 +121,13 @@ test "Parse Request" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + try request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 256, }); - defer request.deinit(); - - try request.parse_headers(request_text[0..]); try testing.expectEqual(.GET, request.method); try testing.expectEqualStrings("/", request.uri); @@ -155,14 +147,13 @@ test "Expect ContentTooLong Error" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 4096}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 128, .size_request_uri_max = 64, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.ContentTooLarge, err); } @@ -175,14 +166,13 @@ test "Expect URITooLong Error" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 4096}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024 * 1024, .size_request_uri_max = 2048, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.URITooLong, err); } @@ -195,14 +185,13 @@ test "Expect Malformed when URI missing /" { ; const request_text = std.fmt.comptimePrint(request_text_format, .{[_]u8{'a'} ** 256}); - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.MalformedRequest, err); } @@ -214,14 +203,13 @@ test "Expect Incorrect HTTP Version" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.HTTPVersionNotSupported, err); } @@ -233,13 +221,12 @@ test "Malformed Headers" { \\Accept: text/html ; - var request = try Request.init(testing.allocator, .{ - .num_header_max = 32, + var request = try Request.init(testing.allocator, 32); + defer request.deinit(); + + const err = request.parse_headers(request_text[0..], .{ .size_request_max = 1024, .size_request_uri_max = 512, }); - defer request.deinit(); - - const err = request.parse_headers(request_text[0..]); try testing.expectError(HTTPError.MalformedRequest, err); } diff --git a/src/http/response.zig b/src/http/response.zig index 19f4349..3335d9d 100644 --- a/src/http/response.zig +++ b/src/http/response.zig @@ -6,51 +6,22 @@ const Status = @import("lib.zig").Status; const Mime = @import("lib.zig").Mime; const Date = @import("lib.zig").Date; -const ResponseOptions = struct { - num_headers_max: u32, -}; - -const CachedDate = struct { - buffer: []u8, - ts: i64, - index: usize, -}; - pub const Response = struct { allocator: std.mem.Allocator, status: ?Status = null, mime: ?Mime = null, body: ?[]const u8 = null, headers: Headers, - cached_date: CachedDate, - pub fn init(allocator: std.mem.Allocator, options: ResponseOptions) !Response { + pub fn init(allocator: std.mem.Allocator, num_header_max: u32) !Response { return Response{ .allocator = allocator, - .headers = try Headers.init(allocator, options.num_headers_max), - .cached_date = CachedDate{ - .buffer = try allocator.alloc(u8, 32), - .index = 0, - .ts = 0, - }, + .headers = try Headers.init(allocator, num_header_max), }; } pub fn deinit(self: *Response) void { self.headers.deinit(); - self.allocator.free(self.cached_date.buffer); - } - - pub fn set_status(self: *Response, status: Status) void { - self.status = status; - } - - pub fn set_mime(self: *Response, mime: Mime) void { - self.mime = mime; - } - - pub fn set_body(self: *Response, body: []const u8) void { - self.body = body; } pub fn clear(self: *Response) void { @@ -97,32 +68,8 @@ pub const Response = struct { return error.MissingStatus; } - std.mem.copyForwards(u8, buffer[index..], "\r\n"); - index += 2; - - // Standard Headers - // Cache the Date - const ts = std.time.timestamp(); - if (ts != 0) { - if (self.cached_date.ts != ts) { - const date = Date.init(ts).to_http_date(); - const buf = try date.into_buf(self.cached_date.buffer); - self.cached_date = .{ - .ts = ts, - .buffer = self.cached_date.buffer, - .index = buf.len, - }; - } - std.mem.copyForwards(u8, buffer[index..], "Date: "); - index += 6; - std.mem.copyForwards(u8, buffer[index..], self.cached_date.buffer[0..self.cached_date.index]); - index += self.cached_date.index; - std.mem.copyForwards(u8, buffer[index..], "\r\n"); - index += 2; - } - - std.mem.copyForwards(u8, buffer[index..], "Server: zzz\r\nConnection: keep-alive\r\n"); - index += 37; + std.mem.copyForwards(u8, buffer[index..], "\r\nServer: zzz\r\nConnection: keep-alive\r\n"); + index += 39; // Headers var iter = self.headers.map.iterator(); diff --git a/src/http/server.zig b/src/http/server.zig index bec92a7..97bffcf 100644 --- a/src/http/server.zig +++ b/src/http/server.zig @@ -102,13 +102,18 @@ pub const ServerConfig = struct { /// after an arena is cleared. /// /// A higher value will increase memory usage but - /// should make allocators faster.Tardy + /// should make allocators faster. /// /// A lower value will reduce memory usage but /// will make allocators slower. /// /// Default: 1KB size_connection_arena_retain: u32 = 1024, + /// Amount of space on the `recv_buffer` retained + /// after every send. + /// + /// Default: 1KB + size_recv_buffer_retain: u32 = 1024, /// Size of the buffer (in bytes) used for /// interacting with the socket. /// @@ -158,90 +163,6 @@ pub fn Server(comptime security: Security) type { tls_ctx: TLSContextType, router: *const Router, - fn route_and_respond(runtime: *Runtime, p: *Provision, router: *const Router) !RecvStatus { - route: { - const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); - if (found) |f| { - const handler = f.route.get_handler(p.request.method); - - if (handler) |h_with_data| { - const context: *Context = try p.arena.allocator().create(Context); - context.* = .{ - .allocator = p.arena.allocator(), - .runtime = runtime, - .request = &p.request, - .response = &p.response, - .path = p.request.uri, - .captures = f.captures, - .queries = f.queries, - .provision = p, - }; - - @call(.auto, h_with_data.handler, .{ - context, - @as(*anyopaque, @ptrFromInt(h_with_data.data)), - }) catch |e| { - log.err("\"{s}\" handler failed with error: {}", .{ p.request.uri, e }); - p.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - - return try raw_respond(p); - }; - - return .spawned; - } else { - // If we match the route but not the method. - p.response.set(.{ - .status = .@"Method Not Allowed", - .mime = Mime.HTML, - .body = "405 Method Not Allowed", - }); - - // We also need to add to Allow header. - // This uses the connection's arena to allocate 64 bytes. - const allowed = f.route.get_allowed(p.arena.allocator()) catch { - p.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - - break :route; - }; - - p.response.headers.add("Allow", allowed) catch { - p.response.set(.{ - .status = .@"Internal Server Error", - .mime = Mime.HTML, - .body = "", - }); - - break :route; - }; - - break :route; - } - } - - // Didn't match any route. - p.response.set(.{ - .status = .@"Not Found", - .mime = Mime.HTML, - .body = "404 Not Found", - }); - break :route; - } - - if (p.response.status == .Kill) { - return .kill; - } - - return try raw_respond(p); - } - pub fn init(config: ServerConfig) Self { const tls_ctx = switch (comptime security) { .tls => |inner| TLSContext.init(.{ @@ -264,50 +185,12 @@ pub fn Server(comptime security: Security) type { }; } - pub fn deinit(self: *Self) void { + pub fn deinit(self: *const Self) void { if (comptime security == .tls) { self.tls_ctx.deinit(); } } - fn create_socket(self: *const Self) !std.posix.socket_t { - const socket: std.posix.socket_t = blk: { - const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; - break :blk try std.posix.socket( - self.addr.any.family, - socket_flags, - std.posix.IPPROTO.TCP, - ); - }; - - log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); - - if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT_LB, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEPORT, - &std.mem.toBytes(@as(c_int, 1)), - ); - } else { - try std.posix.setsockopt( - socket, - std.posix.SOL.SOCKET, - std.posix.SO.REUSEADDR, - &std.mem.toBytes(@as(c_int, 1)), - ); - } - - return socket; - } - pub fn bind(self: *Self, host: []const u8, port: u16) !void { assert(host.len > 0); assert(port > 0); @@ -342,9 +225,8 @@ pub fn Server(comptime security: Security) type { _ = provision.arena.reset(.{ .retain_with_limit = config.size_connection_arena_retain }); provision.response.clear(); - // TODO: new config setting here! - if (provision.recv_buffer.items.len > 1024) { - provision.recv_buffer.shrinkRetainingCapacity(1024); + if (provision.recv_buffer.items.len > config.size_recv_buffer_retain) { + provision.recv_buffer.shrinkRetainingCapacity(config.size_recv_buffer_retain); } else { provision.recv_buffer.clearRetainingCapacity(); } @@ -468,204 +350,7 @@ pub fn Server(comptime security: Security) type { } }; - var status: RecvStatus = status: { - var stage = provision.stage; - const job = provision.job.recv; - - if (job.count >= config.size_request_max) { - provision.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); - - break :status raw_respond(provision) catch unreachable; - } - - switch (stage) { - .header => { - const start = provision.recv_buffer.items.len -| 4; - provision.recv_buffer.appendSlice(recv_buffer) catch unreachable; - const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); - - // Basically, this means we haven't finished processing the header. - if (header_ends == null) { - log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); - break :status .recv; - } - - log.debug("{d} - parsing header", .{provision.index}); - // The +4 is to account for the slice we match. - const header_end: u32 = @intCast(header_ends.? + 4); - provision.request.parse_headers(provision.recv_buffer.items[0..header_end]) catch |e| { - switch (e) { - HTTPError.ContentTooLarge => { - provision.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "Request was too large", - }); - }, - HTTPError.TooManyHeaders => { - provision.response.set(.{ - .status = .@"Request Header Fields Too Large", - .mime = Mime.HTML, - .body = "Too Many Headers", - }); - }, - HTTPError.MalformedRequest => { - provision.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Malformed Request", - }); - }, - HTTPError.URITooLong => { - provision.response.set(.{ - .status = .@"URI Too Long", - .mime = Mime.HTML, - .body = "URI Too Long", - }); - }, - HTTPError.InvalidMethod => { - provision.response.set(.{ - .status = .@"Not Implemented", - .mime = Mime.HTML, - .body = "Not Implemented", - }); - }, - HTTPError.HTTPVersionNotSupported => { - provision.response.set(.{ - .status = .@"HTTP Version Not Supported", - .mime = Mime.HTML, - .body = "HTTP Version Not Supported", - }); - }, - } - - break :status raw_respond(provision) catch unreachable; - }; - - // Logging information about Request. - log.info("{d} - \"{s} {s}\" {s}", .{ - provision.index, - @tagName(provision.request.method), - provision.request.uri, - provision.request.headers.get("User-Agent") orelse "N/A", - }); - - // HTTP/1.1 REQUIRES a Host header to be present. - const is_http_1_1 = provision.request.version == .@"HTTP/1.1"; - const is_host_present = provision.request.headers.get("Host") != null; - if (is_http_1_1 and !is_host_present) { - provision.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "Missing \"Host\" Header", - }); - - break :status raw_respond(provision) catch unreachable; - } - - if (!provision.request.expect_body()) { - break :status route_and_respond(rt, provision, router) catch unreachable; - } - - // Everything after here is a Request that is expecting a body. - const content_length = blk: { - const length_string = provision.request.headers.get("Content-Length") orelse { - break :blk 0; - }; - - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); - - break :status raw_respond(provision) catch unreachable; - }; - }; - - if (header_end < provision.recv_buffer.items.len) { - const difference = provision.recv_buffer.items.len - header_end; - if (difference == content_length) { - // Whole Body - log.debug("{d} - got whole body with header", .{provision.index}); - const body_end = header_end + difference; - provision.request.set_body(provision.recv_buffer.items[header_end..body_end]); - break :status route_and_respond(rt, provision, router) catch unreachable; - } else { - // Partial Body - log.debug("{d} - got partial body with header", .{provision.index}); - stage = .{ .body = header_end }; - break :status .recv; - } - } else if (header_end == provision.recv_buffer.items.len) { - // Body of length 0 probably or only got header. - if (content_length == 0) { - log.debug("{d} - got body of length 0", .{provision.index}); - // Body of Length 0. - provision.request.set_body(""); - break :status route_and_respond(rt, provision, router) catch unreachable; - } else { - // Got only header. - log.debug("{d} - got all header aka no body", .{provision.index}); - stage = .{ .body = header_end }; - break :status .recv; - } - } else unreachable; - }, - - .body => |header_end| { - // We should ONLY be here if we expect there to be a body. - assert(provision.request.expect_body()); - log.debug("{d} - body matching trigger_tasked", .{provision.index}); - - const content_length = blk: { - const length_string = provision.request.headers.get("Content-Length") orelse { - provision.response.set(.{ - .status = .@"Length Required", - .mime = Mime.HTML, - .body = "", - }); - - break :status raw_respond(provision) catch unreachable; - }; - - break :blk std.fmt.parseInt(u32, length_string, 10) catch { - provision.response.set(.{ - .status = .@"Bad Request", - .mime = Mime.HTML, - .body = "", - }); - - break :status raw_respond(provision) catch unreachable; - }; - }; - - const request_length = header_end + content_length; - - // If this body will be too long, abort early. - if (request_length > config.size_request_max) { - provision.response.set(.{ - .status = .@"Content Too Large", - .mime = Mime.HTML, - .body = "", - }); - break :status raw_respond(provision) catch unreachable; - } - - if (job.count >= request_length) { - provision.request.set_body(provision.recv_buffer.items[header_end..request_length]); - break :status route_and_respond(rt, provision, router) catch unreachable; - } else { - break :status .recv; - } - }, - } - }; + const status = try on_recv(recv_buffer, rt, provision, router, config); switch (status) { .spawned => return, @@ -681,61 +366,14 @@ pub fn Server(comptime security: Security) type { provision.buffer, ); }, - .send => |*pslice| { - const plain_buffer = pslice.get(0, config.size_socket_buffer); - - switch (comptime security) { - .tls => |_| { - const tls_slice = rt.storage.get("__zzz_tls_slice", []TLSType); - const tls_ptr: *TLSType = &tls_slice[provision.index]; - assert(tls_ptr.* != null); - - const encrypted_buffer = tls_ptr.*.?.encrypt(plain_buffer) catch |e| { - log.err("{d} - encrypt failed: {any}", .{ provision.index, e }); - provision.job = .close; - try rt.net.close(provision, close_task, provision.socket); - return error.TLSEncryptFailed; - }; - - provision.job = .{ - .send = .{ - .after = .recv, - .slice = pslice.*, - .count = @intCast(plain_buffer.len), - .security = .{ - .tls = .{ - .encrypted = encrypted_buffer, - .encrypted_count = 0, - }, - }, - }, - }; - - try rt.net.send( - provision, - send_then_recv_task, - provision.socket, - encrypted_buffer, - ); - }, - .plain => { - provision.job = .{ - .send = .{ - .after = .recv, - .slice = pslice.*, - .count = 0, - .security = .plain, - }, - }; - - try rt.net.send( - provision, - send_then_recv_task, - provision.socket, - plain_buffer, - ); - }, - } + .send => |pslice| { + const first_buffer = try prepare_send(rt, provision, .recv, pslice); + try rt.net.send( + provision, + send_then_recv_task, + provision.socket, + first_buffer, + ); }, } } @@ -1065,5 +703,323 @@ pub fn Server(comptime security: Security) type { rt.allocator.free(tls_slice); } } + + fn create_socket(self: *const Self) !std.posix.socket_t { + const socket: std.posix.socket_t = blk: { + const socket_flags = std.posix.SOCK.STREAM | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; + break :blk try std.posix.socket( + self.addr.any.family, + socket_flags, + std.posix.IPPROTO.TCP, + ); + }; + + log.debug("socket | t: {s} v: {any}", .{ @typeName(std.posix.socket_t), socket }); + + if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT_LB, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEPORT, + &std.mem.toBytes(@as(c_int, 1)), + ); + } else { + try std.posix.setsockopt( + socket, + std.posix.SOL.SOCKET, + std.posix.SO.REUSEADDR, + &std.mem.toBytes(@as(c_int, 1)), + ); + } + + return socket; + } + + fn route_and_respond(runtime: *Runtime, p: *Provision, router: *const Router) !RecvStatus { + route: { + const found = router.get_route_from_host(p.request.uri, p.captures, &p.queries); + if (found) |f| { + const handler = f.route.get_handler(p.request.method); + + if (handler) |h_with_data| { + const context: *Context = try p.arena.allocator().create(Context); + context.* = .{ + .allocator = p.arena.allocator(), + .runtime = runtime, + .request = &p.request, + .response = &p.response, + .path = p.request.uri, + .captures = f.captures, + .queries = f.queries, + .provision = p, + }; + + @call(.auto, h_with_data.handler, .{ + context, + @as(*anyopaque, @ptrFromInt(h_with_data.data)), + }) catch |e| { + log.err("\"{s}\" handler failed with error: {}", .{ p.request.uri, e }); + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + return try raw_respond(p); + }; + + return .spawned; + } else { + // If we match the route but not the method. + p.response.set(.{ + .status = .@"Method Not Allowed", + .mime = Mime.HTML, + .body = "405 Method Not Allowed", + }); + + // We also need to add to Allow header. + // This uses the connection's arena to allocate 64 bytes. + const allowed = f.route.get_allowed(p.arena.allocator()) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + p.response.headers.add("Allow", allowed) catch { + p.response.set(.{ + .status = .@"Internal Server Error", + .mime = Mime.HTML, + .body = "", + }); + + break :route; + }; + + break :route; + } + } + + // Didn't match any route. + p.response.set(.{ + .status = .@"Not Found", + .mime = Mime.HTML, + .body = "404 Not Found", + }); + break :route; + } + + if (p.response.status == .Kill) { + return .kill; + } + + return try raw_respond(p); + } + + inline fn on_recv( + buffer: []const u8, + rt: *Runtime, + provision: *Provision, + router: *const Router, + config: *const ServerConfig, + ) !RecvStatus { + var stage = provision.stage; + const job = provision.job.recv; + + if (job.count >= config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + + return try raw_respond(provision); + } + + switch (stage) { + .header => { + const start = provision.recv_buffer.items.len -| 4; + try provision.recv_buffer.appendSlice(buffer); + const header_ends = std.mem.lastIndexOf(u8, provision.recv_buffer.items[start..], "\r\n\r\n"); + + // Basically, this means we haven't finished processing the header. + if (header_ends == null) { + log.debug("{d} - header doesn't end in this chunk, continue", .{provision.index}); + return .recv; + } + + log.debug("{d} - parsing header", .{provision.index}); + // The +4 is to account for the slice we match. + const header_end: u32 = @intCast(header_ends.? + 4); + provision.request.parse_headers(provision.recv_buffer.items[0..header_end], .{ + .size_request_max = config.size_request_max, + .size_request_uri_max = config.size_request_uri_max, + }) catch |e| { + switch (e) { + HTTPError.ContentTooLarge => { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "Request was too large", + }); + }, + HTTPError.TooManyHeaders => { + provision.response.set(.{ + .status = .@"Request Header Fields Too Large", + .mime = Mime.HTML, + .body = "Too Many Headers", + }); + }, + HTTPError.MalformedRequest => { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Malformed Request", + }); + }, + HTTPError.URITooLong => { + provision.response.set(.{ + .status = .@"URI Too Long", + .mime = Mime.HTML, + .body = "URI Too Long", + }); + }, + HTTPError.InvalidMethod => { + provision.response.set(.{ + .status = .@"Not Implemented", + .mime = Mime.HTML, + .body = "Not Implemented", + }); + }, + HTTPError.HTTPVersionNotSupported => { + provision.response.set(.{ + .status = .@"HTTP Version Not Supported", + .mime = Mime.HTML, + .body = "HTTP Version Not Supported", + }); + }, + } + + return raw_respond(provision) catch unreachable; + }; + + // Logging information about Request. + log.info("{d} - \"{s} {s}\" {s}", .{ + provision.index, + @tagName(provision.request.method), + provision.request.uri, + provision.request.headers.get("User-Agent") orelse "N/A", + }); + + // HTTP/1.1 REQUIRES a Host header to be present. + const is_http_1_1 = provision.request.version == .@"HTTP/1.1"; + const is_host_present = provision.request.headers.get("Host") != null; + if (is_http_1_1 and !is_host_present) { + provision.response.set(.{ + .status = .@"Bad Request", + .mime = Mime.HTML, + .body = "Missing \"Host\" Header", + }); + + return try raw_respond(provision); + } + + if (!provision.request.expect_body()) { + return try route_and_respond(rt, provision, router); + } + + // Everything after here is a Request that is expecting a body. + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + break :blk 0; + }; + + break :blk try std.fmt.parseInt(u32, length_string, 10); + }; + + if (header_end < provision.recv_buffer.items.len) { + const difference = provision.recv_buffer.items.len - header_end; + if (difference == content_length) { + // Whole Body + log.debug("{d} - got whole body with header", .{provision.index}); + const body_end = header_end + difference; + provision.request.set(.{ + .body = provision.recv_buffer.items[header_end..body_end], + }); + return try route_and_respond(rt, provision, router); + } else { + // Partial Body + log.debug("{d} - got partial body with header", .{provision.index}); + stage = .{ .body = header_end }; + return .recv; + } + } else if (header_end == provision.recv_buffer.items.len) { + // Body of length 0 probably or only got header. + if (content_length == 0) { + log.debug("{d} - got body of length 0", .{provision.index}); + // Body of Length 0. + provision.request.set(.{ .body = "" }); + return try route_and_respond(rt, provision, router); + } else { + // Got only header. + log.debug("{d} - got all header aka no body", .{provision.index}); + stage = .{ .body = header_end }; + return .recv; + } + } else unreachable; + }, + + .body => |header_end| { + // We should ONLY be here if we expect there to be a body. + assert(provision.request.expect_body()); + log.debug("{d} - body matching", .{provision.index}); + + const content_length = blk: { + const length_string = provision.request.headers.get("Content-Length") orelse { + provision.response.set(.{ + .status = .@"Length Required", + .mime = Mime.HTML, + .body = "", + }); + + return try raw_respond(provision); + }; + + break :blk try std.fmt.parseInt(u32, length_string, 10); + }; + + const request_length = header_end + content_length; + + // If this body will be too long, abort early. + if (request_length > config.size_request_max) { + provision.response.set(.{ + .status = .@"Content Too Large", + .mime = Mime.HTML, + .body = "", + }); + return try raw_respond(provision); + } + + if (job.count >= request_length) { + provision.request.set(.{ + .body = provision.recv_buffer.items[header_end..request_length], + }); + return try route_and_respond(rt, provision, router); + } else { + return .recv; + } + }, + } + } }; } diff --git a/src/http/sse.zig b/src/http/sse.zig index 24abf9d..2b5ee66 100644 --- a/src/http/sse.zig +++ b/src/http/sse.zig @@ -33,38 +33,22 @@ pub fn SSE(comptime Server: type) type { const buffer = self.context.provision.buffer; if (options.id) |id| { - const buf = try std.fmt.bufPrint( - buffer[index..], - "id: {s}\n", - .{id}, - ); + const buf = try std.fmt.bufPrint(buffer[index..], "id: {s}\n", .{id}); index += buf.len; } if (options.event) |event| { - const buf = try std.fmt.bufPrint( - buffer[index..], - "event: {s}\n", - .{event}, - ); + const buf = try std.fmt.bufPrint(buffer[index..], "event: {s}\n", .{event}); index += buf.len; } if (options.data) |data| { - const buf = try std.fmt.bufPrint( - buffer[index..], - "data: {s}\n", - .{data}, - ); + const buf = try std.fmt.bufPrint(buffer[index..], "data: {s}\n", .{data}); index += buf.len; } if (options.retry) |retry| { - const buf = try std.fmt.bufPrint( - buffer[index..], - "retry: {d}\n", - .{retry}, - ); + const buf = try std.fmt.bufPrint(buffer[index..], "retry: {d}\n", .{retry}); index += buf.len; } From cbc8bf7767eef1b437c95df386bb39c8cc72e5f7 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Fri, 8 Nov 2024 16:37:43 -0800 Subject: [PATCH 15/17] chore(docs): add README and LICENSE to package --- build.zig.zon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build.zig.zon b/build.zig.zon index a9d9b60..7ecb164 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -14,6 +14,8 @@ }, .paths = .{ + "README.md", + "LICENSE", "build.zig", "build.zig.zon", "src", From 2e6327bdd406d829cc4b05078568bb1631d3d200 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 10 Nov 2024 01:35:11 -0800 Subject: [PATCH 16/17] chore(docs): prepare for v0.2.0 --- README.md | 26 ++++---- build.zig | 46 ++++++------- build.zig.zon | 6 +- docs/getting_started.md | 65 +++++++++++++----- docs/https.md | 76 ++++++++++++---------- docs/performance.md | 12 ++-- examples/{http => }/basic/main.zig | 0 examples/{http => }/benchmark/index.html | 0 examples/{http => }/benchmark/main.zig | 0 examples/{http => }/fs/main.zig | 0 examples/{http => }/fs/static/index.html | 0 examples/{http => }/minram/main.zig | 0 examples/{http => }/multithread/index.html | 0 examples/{http => }/multithread/main.zig | 0 examples/{http => }/sse/index.html | 0 examples/{http => }/sse/main.zig | 0 examples/{http => }/tls/certs/cert.pem | 0 examples/{http => }/tls/certs/key.pem | 0 examples/{http => }/tls/embed/pico.min.css | 0 examples/{http => }/tls/main.zig | 0 examples/{http => }/valgrind/main.zig | 0 flake.lock | 12 ++-- flake.nix | 3 +- src/http/headers.zig | 2 +- 24 files changed, 137 insertions(+), 111 deletions(-) rename examples/{http => }/basic/main.zig (100%) rename examples/{http => }/benchmark/index.html (100%) rename examples/{http => }/benchmark/main.zig (100%) rename examples/{http => }/fs/main.zig (100%) rename examples/{http => }/fs/static/index.html (100%) rename examples/{http => }/minram/main.zig (100%) rename examples/{http => }/multithread/index.html (100%) rename examples/{http => }/multithread/main.zig (100%) rename examples/{http => }/sse/index.html (100%) rename examples/{http => }/sse/main.zig (100%) rename examples/{http => }/tls/certs/cert.pem (100%) rename examples/{http => }/tls/certs/key.pem (100%) rename examples/{http => }/tls/embed/pico.min.css (100%) rename examples/{http => }/tls/main.zig (100%) rename examples/{http => }/valgrind/main.zig (100%) diff --git a/README.md b/README.md index d4e11be..5838a2a 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ ## Installing Latest Zig Stable: `0.13.0` -Latest zzz release: `0.1.0` +Latest zzz release: `0.2.0` ``` -zig fetch --save git+https://github.com/mookums/zzz#v0.1.0 +zig fetch --save git+https://github.com/mookums/zzz#v0.2.0 ``` You can then add the dependency in your `build.zig` file: @@ -21,14 +21,14 @@ exe.root_module.addImport(zzz); ``` ## zzz? -zzz is a framework for writing performant and reliable networked services in Zig. It currently only supports TCP as the underlying transport layer and allows for any arbitrary protocol to run on top. It also natively supports TLS for securing connections. +zzz is a framework for writing performant and reliable networked services in Zig. It supports both HTTP and HTTPS (using BearSSL for TLS). -zzz currently supports Linux, Mac and Windows. Linux is currently the only target supported for deployments. +zzz currently supports Linux, Mac and Windows. Linux is currently the reccomended target for deployments. > [!IMPORTANT] > zzz is currently **alpha** software and there is still a lot changing at a fairly quick pace and certain places where things are less polished. -It focuses on modularity and portability, allowing you to swap in your own implementations for various things. Consumers can provide both a protocol and an async implementation, allowing for maximum flexibility. This allows for use in standard servers as well as embedded/bare metal domains. +It focuses on modularity and portability, allowing you to swap in your own implementations for various things. Consumers can provide an async implementation, allowing for maximum flexibility. This allows for use in standard servers as well as embedded/bare metal domains. For more information, look here: 1. [Getting Started](./docs/getting_started.md) @@ -51,20 +51,18 @@ With the recent migration to [tardy](https://github.com/mookums/tardy), zzz is a On the CCX63 instance on Hetzner with 2000 max connections, we are 70.9% faster than [zap](https://github.com/zigzap/zap) and 83.8% faster than [http.zig](https://github.com/karlseguin/http.zig). We also utilize less memory, using only ~3% of the memory used by zap and ~1.6% of the memory used by http.zig. -zzz can be configured to utilize minimal memory while remaining performant. The provided `minram` example only uses 256 kB (using `io_uring` and musl)! +zzz can be configured to utilize minimal memory while remaining performant. The provided `minram` example only uses 256 kB! ## Features - Built on top of [Tardy](https://github.com/mookums/tardy), an asynchronous runtime. - [Modular Asynchronous Implementation](https://muki.gg/post/modular-async) - `io_uring` for Linux (>= 5.1.0). - `epoll` for Linux (>= 2.5.45). + - `kqueue` for BSD & Mac. - `busy_loop` for Linux, Mac and Windows. -- Modular Protocol Implementation - - Allows for defining your own Protocol on top of TCP. - - Comes with: - - [HTTP/1.1](https://github.com/mookums/zzz/blob/main/src/http) - - HTTP/2 (planned) - - MQTT (planned) -- Single and Multi-threaded Support +- Single and Multithreaded Support - TLS using BearSSL -- (Almost) all memory allocated at startup +- Memory Pooling for minimal allocations + +## Contribution +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in zzz by you, shall be licensed as MPL2.0, without any additional terms or conditions. diff --git a/build.zig b/build.zig index 80c0a2c..9e38017 100644 --- a/build.zig +++ b/build.zig @@ -28,15 +28,14 @@ pub fn build(b: *std.Build) void { zzz.linkLibrary(bearssl); - add_example(b, "basic", .http, false, target, optimize, zzz, tardy); - add_example(b, "sse", .http, false, target, optimize, zzz, tardy); - add_example(b, "custom", .http, false, target, optimize, zzz, tardy); - add_example(b, "tls", .http, true, target, optimize, zzz, tardy); - add_example(b, "minram", .http, false, target, optimize, zzz, tardy); - add_example(b, "fs", .http, false, target, optimize, zzz, tardy); - add_example(b, "multithread", .http, false, target, optimize, zzz, tardy); - add_example(b, "benchmark", .http, false, target, optimize, zzz, tardy); - add_example(b, "valgrind", .http, true, target, optimize, zzz, tardy); + add_example(b, "basic", false, target, optimize, zzz, tardy); + add_example(b, "sse", false, target, optimize, zzz, tardy); + add_example(b, "tls", true, target, optimize, zzz, tardy); + add_example(b, "minram", false, target, optimize, zzz, tardy); + add_example(b, "fs", false, target, optimize, zzz, tardy); + add_example(b, "multithread", false, target, optimize, zzz, tardy); + add_example(b, "benchmark", false, target, optimize, zzz, tardy); + add_example(b, "valgrind", true, target, optimize, zzz, tardy); const tests = b.addTest(.{ .name = "tests", @@ -51,14 +50,9 @@ pub fn build(b: *std.Build) void { test_step.dependOn(&run_test.step); } -const Protocol = enum { - http, -}; - fn add_example( b: *std.Build, name: []const u8, - protocol: Protocol, link_libc: bool, target: std.Build.ResolvedTarget, optimize: std.builtin.Mode, @@ -66,8 +60,8 @@ fn add_example( tardy_module: *std.Build.Module, ) void { const example = b.addExecutable(.{ - .name = b.fmt("{s}_{s}", .{ @tagName(protocol), name }), - .root_source_file = b.path(b.fmt("./examples/{s}/{s}/main.zig", .{ @tagName(protocol), name })), + .name = name, + .root_source_file = b.path(b.fmt("./examples/{s}/main.zig", .{name})), .target = target, .optimize = optimize, .strip = false, @@ -79,17 +73,17 @@ fn add_example( example.root_module.addImport("zzz", zzz_module); example.root_module.addImport("tardy", tardy_module); + const install_artifact = b.addInstallArtifact(example, .{}); + b.getInstallStep().dependOn(&install_artifact.step); - const run_cmd = b.addRunArtifact(example); - run_cmd.step.dependOn(&install_artifact.step); - if (b.args) |args| { - run_cmd.addArgs(args); - } + const build_step = b.step(b.fmt("{s}", .{name}), b.fmt("Build zzz example ({s})", .{name})); + build_step.dependOn(&install_artifact.step); + + const run_artifact = b.addRunArtifact(example); + run_artifact.step.dependOn(&install_artifact.step); - const run_step = b.step( - b.fmt("run_{s}_{s}", .{ @tagName(protocol), name }), - b.fmt("Run {s} {s}", .{ @tagName(protocol), name }), - ); - run_step.dependOn(&run_cmd.step); + const run_step = b.step(b.fmt("run_{s}", .{name}), b.fmt("Run zzz example ({s})", .{name})); + run_step.dependOn(&install_artifact.step); + run_step.dependOn(&run_artifact.step); } diff --git a/build.zig.zon b/build.zig.zon index 7ecb164..e2315da 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,11 +1,11 @@ .{ .name = "zzz", - .version = "0.0.0", + .version = "0.2.0", .minimum_zig_version = "0.13.0", .dependencies = .{ .tardy = .{ - .url = "git+https://github.com/mookums/tardy?ref=main#feae50e9bf60ac13f1d5d14a7d3346fcfe442fa8", - .hash = "122093168263d66adc14bbee5de6aa0d4a2600e7299cad2b66175feeb6ce8aaef173", + .url = "git+https://github.com/mookums/tardy?ref=v0.1.0#ae0970d6b3fa5b03625b14e142c664efe1fd7789", + .hash = "12207f5afee3b8933c1c32737e8feedc80a2e4feebe058739509094c812e4a8d2cc8", }, .bearssl = .{ .url = "https://github.com/mookums/bearssl-zig/archive/37a96eee56fe2543579bbc6da148ca886f3dd32b.tar.gz", diff --git a/docs/getting_started.md b/docs/getting_started.md index 5e46ee4..b08dca6 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -1,18 +1,30 @@ # Getting Started zzz is a networking framework that allows for modularity and flexibility in design. For most use cases, this flexibility is not a requirement and so various defaults are provided. -For this guide, we will assume that you are running on a modern Linux platform and looking to design a service that utilizes HTTP. +For this guide, we will assume that you are running on a modern Linux platform and looking to design a service that utilizes HTTP. We will need both `zzz` and `tardy` for this to work. +You will need to match the version of Tardy that zzz is currently using to the version of Tardy you currently use within your program. This will eventually be standardized. -`zig fetch --save git+https://github.com/mookums/zzz#main` +These are the current latest releases and are compatible. +`zig fetch --save git+https://github.com/mookums/zzz#v0.2.0` +`zig fetch --save git+https://github.com/mookums/tardy#v0.1.0` ## Hello, World! We can write a quick example that serves out "Hello, World" responses to any client that connects to the server. This example is the same as the one that is provided within the `examples/basic` directory. ```zig const std = @import("std"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/basic"); + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.plain); +const Router = Server.Router; +const Context = Server.Context; +const Route = Server.Route; pub fn main() !void { const host: []const u8 = "0.0.0.0"; @@ -22,21 +34,33 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + // Creating our Tardy instance that will spawn our runtimes. + var t = try Tardy.init(.{ + .allocator = allocator, + .threading = .single, + }); + defer t.deinit(); + + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - const body = + const num: i8 = 12; + + try router.serve_route("/", Route.init().get(&num, struct { + pub fn handler_fn(ctx: *Context, id: *const i8) !void { + const body_fmt = \\ \\ \\ \\

Hello, World!

+ \\

id: {d}

\\ \\ ; - response.set(.{ + const body = try std.fmt.allocPrint(ctx.allocator, body_fmt, .{id.*}); + + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -44,14 +68,23 @@ pub fn main() !void { } }.handler_fn)); - var server = http.Server(.plain, .auto).init(.{ - .allocator = allocator, - .threading = .single, - }); - defer server.deinit(); - - try server.bind(host, port); - try server.listen(.{ .router = &router }); + // This provides the entry function into every Tardy runtime. + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); } ``` diff --git a/docs/https.md b/docs/https.md index f2950b3..ece4156 100644 --- a/docs/https.md +++ b/docs/https.md @@ -7,23 +7,40 @@ zzz utilizes [BearSSL](https://bearssl.org/) to provide a safe and performant TL This is derived from the example at `examples/tls` and utilizes some certificates that are present within the repository. ```zig const std = @import("std"); + const zzz = @import("zzz"); const http = zzz.HTTP; -const log = std.log.scoped(.@"examples/tls"); + +const tardy = @import("tardy"); +const Tardy = tardy.Tardy(.auto); +const Runtime = tardy.Runtime; + +const Server = http.Server(.{ .tls = .{ + .cert = .{ .file = .{ .path = "./examples/http/tls/certs/cert.pem" } }, + .key = .{ .file = .{ .path = "./examples/http/tls/certs/key.pem" } }, + .cert_name = "CERTIFICATE", + .key_name = "EC PRIVATE KEY", +} }); + +const Context = Server.Context; +const Route = Server.Route; +const Router = Server.Router; pub fn main() !void { const host: []const u8 = "0.0.0.0"; const port: u16 = 9862; - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa = std.heap.GeneralPurposeAllocator( + .{ .thread_safe = true }, + ){ .backing_allocator = std.heap.c_allocator }; const allocator = gpa.allocator(); defer _ = gpa.deinit(); - var router = http.Router.init(allocator); + var router = Router.init(allocator); defer router.deinit(); - try router.serve_route("/", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { + try router.serve_route("/", Route.init().get({}, struct { + pub fn handler_fn(ctx: *Context, _: void) !void { const body = \\ \\ @@ -33,7 +50,7 @@ pub fn main() !void { \\ ; - response.set(.{ + try ctx.respond(.{ .status = .OK, .mime = http.Mime.HTML, .body = body[0..], @@ -41,39 +58,28 @@ pub fn main() !void { } }.handler_fn)); - try router.serve_route("/kill", http.Route.init().get(struct { - pub fn handler_fn(_: http.Request, response: *http.Response, _: http.Context) void { - response.set(.{ - .status = .Kill, - .mime = http.Mime.HTML, - .body = "", - }); - } - }.handler_fn)); - - var server = http.Server( - .{ - .tls = .{ - .cert = .{ - .file = .{ .path = "./examples/http/tls/certs/cert.pem" }, - }, - .key = .{ - .file = .{ .path = "./examples/http/tls/certs/key.pem" }, - }, - .cert_name = "CERTIFICATE", - .key_name = "EC PRIVATE KEY", - }, - }, - .auto, - ).init(.{ + var t = try Tardy.init(.{ .allocator = allocator, .threading = .single, }); - defer server.deinit(); + defer t.deinit(); - try server.bind(host, port); - try server.listen(.{ .router = &router }); -} + try t.entry( + &router, + struct { + fn entry(rt: *Runtime, r: *const Router) !void { + var server = Server.init(.{ .allocator = rt.allocator }); + try server.bind(host, port); + try server.serve(r, rt); + } + }.entry, + {}, + struct { + fn exit(rt: *Runtime, _: void) !void { + try Server.clean(rt); + } + }.exit, + ); } ``` This example above passes the `.tls` variant of the enum to the HTTP Server and provides the location of the certificate and key to be used. It also has the functionality to pass in a buffer containing the cert and key data if that is preferable. You must also provide the certificate and key name as the PEM format allows for multiple items to be placed within the same file. diff --git a/docs/performance.md b/docs/performance.md index 90b96a4..57b5a0a 100644 --- a/docs/performance.md +++ b/docs/performance.md @@ -4,16 +4,12 @@ zzz's design philosophy results in a lot of knobs that the consumer of the libra These performance tips are general and can apply to any protocol implementation. HTTP is used as the general example because it is currently the only completed protocol. ## Performance Hunting -zzz now officially runs multithreaded by default. By default, it will utilize `@min(cpu_count / 2 - 1, 1)` threads. This can be tuned by using the `.threading` flag. +zzz now officially runs multithreaded by default. By default, it will utilize `@min(cpu_count / 2 - 1, 1)` threads. This can be tuned by changing the `.threading` option of the Tardy runtime. ```zig -var server = http.Server(.plain, .auto).init(.{ +var t = try Tardy.init(.{ .allocator = allocator, - .threading = .{ .multi = COUNT }, - .size_backlog = 32, - .size_connections_max = 16, - .size_connection_arena_retain = 64, - .size_socket_buffer = 512, + .threading = .{ .multi = COUNT}, }); ``` @@ -30,7 +26,7 @@ Other settings of note include: When using zzz in certain environments, your goal may be to reduce memory usage. zzz provides a variety of controls for handling how much memory is allocated at start up. ```zig -var server = http.Server(.plain, .auto).init(.{ +var server = Server.init(.{ .allocator = allocator, .size_backlog = 32, .size_connections_max = 16, diff --git a/examples/http/basic/main.zig b/examples/basic/main.zig similarity index 100% rename from examples/http/basic/main.zig rename to examples/basic/main.zig diff --git a/examples/http/benchmark/index.html b/examples/benchmark/index.html similarity index 100% rename from examples/http/benchmark/index.html rename to examples/benchmark/index.html diff --git a/examples/http/benchmark/main.zig b/examples/benchmark/main.zig similarity index 100% rename from examples/http/benchmark/main.zig rename to examples/benchmark/main.zig diff --git a/examples/http/fs/main.zig b/examples/fs/main.zig similarity index 100% rename from examples/http/fs/main.zig rename to examples/fs/main.zig diff --git a/examples/http/fs/static/index.html b/examples/fs/static/index.html similarity index 100% rename from examples/http/fs/static/index.html rename to examples/fs/static/index.html diff --git a/examples/http/minram/main.zig b/examples/minram/main.zig similarity index 100% rename from examples/http/minram/main.zig rename to examples/minram/main.zig diff --git a/examples/http/multithread/index.html b/examples/multithread/index.html similarity index 100% rename from examples/http/multithread/index.html rename to examples/multithread/index.html diff --git a/examples/http/multithread/main.zig b/examples/multithread/main.zig similarity index 100% rename from examples/http/multithread/main.zig rename to examples/multithread/main.zig diff --git a/examples/http/sse/index.html b/examples/sse/index.html similarity index 100% rename from examples/http/sse/index.html rename to examples/sse/index.html diff --git a/examples/http/sse/main.zig b/examples/sse/main.zig similarity index 100% rename from examples/http/sse/main.zig rename to examples/sse/main.zig diff --git a/examples/http/tls/certs/cert.pem b/examples/tls/certs/cert.pem similarity index 100% rename from examples/http/tls/certs/cert.pem rename to examples/tls/certs/cert.pem diff --git a/examples/http/tls/certs/key.pem b/examples/tls/certs/key.pem similarity index 100% rename from examples/http/tls/certs/key.pem rename to examples/tls/certs/key.pem diff --git a/examples/http/tls/embed/pico.min.css b/examples/tls/embed/pico.min.css similarity index 100% rename from examples/http/tls/embed/pico.min.css rename to examples/tls/embed/pico.min.css diff --git a/examples/http/tls/main.zig b/examples/tls/main.zig similarity index 100% rename from examples/http/tls/main.zig rename to examples/tls/main.zig diff --git a/examples/http/valgrind/main.zig b/examples/valgrind/main.zig similarity index 100% rename from examples/http/valgrind/main.zig rename to examples/valgrind/main.zig diff --git a/flake.lock b/flake.lock index 34380c1..2451aa0 100644 --- a/flake.lock +++ b/flake.lock @@ -139,11 +139,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1727469213, - "narHash": "sha256-d/65qZtXwVpIwxBTdNuf484Ie58wa5CYK5h+aXsLDoQ=", + "lastModified": 1731172921, + "narHash": "sha256-LTwbJCRZua1cZCiaisO1pnX446EKSZcYy9730jX1F4w=", "owner": "nixos", "repo": "nixpkgs", - "rev": "a9a9e085f155b55e4a6dc49a16572b2c799ba66f", + "rev": "0b88841b5eb61720be435a9e40f69bd853fa232c", "type": "github" }, "original": { @@ -260,11 +260,11 @@ "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1727439069, - "narHash": "sha256-qonN+4DQtubTpV5VluIX57aq6J0UurtW2IGz8dtIzJE=", + "lastModified": 1731154207, + "narHash": "sha256-hWLW7Bt/6614gxX0tbnSW9aJMahOAIQW+LC5HZix+XA=", "owner": "mitchellh", "repo": "zig-overlay", - "rev": "93b14ef3d7f496ae49c79ba392eb3351cb92b57a", + "rev": "9f756b991d0fe79f322432dcd6e97e4ffccceb61", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 23795c2..69863fc 100644 --- a/flake.nix +++ b/flake.nix @@ -1,5 +1,5 @@ { - description = "a high performance communicaton abstraction library"; + description = "a framework for writing performant and reliable networked services"; inputs = { nixpkgs.url = "github:nixos/nixpkgs/release-24.05"; @@ -33,7 +33,6 @@ gdb valgrind # Benchmarking - linuxPackages.perf wrk ]; }; diff --git a/src/http/headers.zig b/src/http/headers.zig index 3ea3e5c..21723e6 100644 --- a/src/http/headers.zig +++ b/src/http/headers.zig @@ -29,7 +29,7 @@ pub const Headers = struct { self.map.putAssumeCapacity(key, value); } - pub fn get(self: *Headers, key: []const u8) ?[]const u8 { + pub fn get(self: *const Headers, key: []const u8) ?[]const u8 { return self.map.get(key); } From f24b005700e010610a043422f74df578d9274b65 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Sun, 10 Nov 2024 01:37:39 -0800 Subject: [PATCH 17/17] chore(docs): update getting started --- docs/getting_started.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/getting_started.md b/docs/getting_started.md index b08dca6..318cc1f 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -5,7 +5,9 @@ For this guide, we will assume that you are running on a modern Linux platform a You will need to match the version of Tardy that zzz is currently using to the version of Tardy you currently use within your program. This will eventually be standardized. These are the current latest releases and are compatible. + `zig fetch --save git+https://github.com/mookums/zzz#v0.2.0` + `zig fetch --save git+https://github.com/mookums/tardy#v0.1.0` ## Hello, World! @@ -69,6 +71,7 @@ pub fn main() !void { }.handler_fn)); // This provides the entry function into every Tardy runtime. + // This runs once within each runtime that spawns. try t.entry( &router, struct {