diff --git a/.github/workflows/wpt.yml b/.github/workflows/wpt.yml
index ddfadf81e..62d768478 100644
--- a/.github/workflows/wpt.yml
+++ b/.github/workflows/wpt.yml
@@ -47,7 +47,7 @@ jobs:
runs-on: ubuntu-latest
container:
- image: ghcr.io/lightpanda-io/zig-browsercore:0.12.0-dev.1773-8a8fd47d2
+ image: ghcr.io/lightpanda-io/zig-browsercore:0.12.1
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/zig-fmt.yml b/.github/workflows/zig-fmt.yml
index ab90714d7..b8f712cad 100644
--- a/.github/workflows/zig-fmt.yml
+++ b/.github/workflows/zig-fmt.yml
@@ -27,7 +27,7 @@ jobs:
runs-on: ubuntu-latest
container:
- image: ghcr.io/lightpanda-io/zig:0.12.0-dev.1773-8a8fd47d2
+ image: ghcr.io/lightpanda-io/zig:0.12.1
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/zig-test.yml b/.github/workflows/zig-test.yml
index 5608747e3..72a2d8ce3 100644
--- a/.github/workflows/zig-test.yml
+++ b/.github/workflows/zig-test.yml
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
container:
- image: ghcr.io/lightpanda-io/zig-browsercore:0.12.0-dev.1773-8a8fd47d2
+ image: ghcr.io/lightpanda-io/zig-browsercore:0.12.1
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
@@ -71,7 +71,7 @@ jobs:
runs-on: ubuntu-latest
container:
- image: ghcr.io/lightpanda-io/zig-browsercore:0.12.0-dev.1773-8a8fd47d2
+ image: ghcr.io/lightpanda-io/zig-browsercore:0.12.1
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
@@ -97,7 +97,7 @@ jobs:
runs-on: ubuntu-latest
container:
- image: ghcr.io/lightpanda-io/zig-browsercore:0.12.0-dev.1773-8a8fd47d2
+ image: ghcr.io/lightpanda-io/zig-browsercore:0.12.1
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
diff --git a/README.md b/README.md
index 6a6a84100..6997fa2ec 100644
--- a/README.md
+++ b/README.md
@@ -76,7 +76,7 @@ We do not provide yet binary versions of Lightpanda, you have to compile it from
### Prerequisites
-Lightpanda is written with [Zig](https://ziglang.org/) `0.12`. You have to
+Lightpanda is written with [Zig](https://ziglang.org/) `0.12.1`. You have to
install it with the right version in order to build the project.
Lightpanda also depends on
diff --git a/build.zig b/build.zig
index e96ae3027..c6b65029c 100644
--- a/build.zig
+++ b/build.zig
@@ -28,7 +28,7 @@ const jsruntime_pkgs = jsruntime.packages(jsruntime_path);
/// which zig version to install.
const recommended_zig_version = jsruntime.recommended_zig_version;
-pub fn build(b: *std.build.Builder) !void {
+pub fn build(b: *std.Build) !void {
switch (comptime builtin.zig_version.order(std.SemanticVersion.parse(recommended_zig_version) catch unreachable)) {
.eq => {},
.lt => {
@@ -53,11 +53,11 @@ pub fn build(b: *std.build.Builder) !void {
// compile and install
const exe = b.addExecutable(.{
.name = "browsercore",
- .root_source_file = .{ .path = "src/main.zig" },
+ .root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = mode,
});
- try common(exe, options);
+ try common(b, exe, options);
b.installArtifact(exe);
// run
@@ -76,11 +76,11 @@ pub fn build(b: *std.build.Builder) !void {
// compile and install
const shell = b.addExecutable(.{
.name = "browsercore-shell",
- .root_source_file = .{ .path = "src/main_shell.zig" },
+ .root_source_file = b.path("src/main_shell.zig"),
.target = target,
.optimize = mode,
});
- try common(shell, options);
+ try common(b, shell, options);
try jsruntime_pkgs.add_shell(shell);
// run
@@ -98,17 +98,17 @@ pub fn build(b: *std.build.Builder) !void {
// compile
const tests = b.addTest(.{
- .root_source_file = .{ .path = "src/run_tests.zig" },
- .test_runner = "src/test_runner.zig",
- .single_threaded = true,
+ .root_source_file = b.path("src/run_tests.zig"),
+ .test_runner = b.path("src/test_runner.zig"),
+ .target = target,
+ .optimize = mode,
});
- try common(tests, options);
+ try common(b, tests, options);
// add jsruntime pretty deps
- const pretty = tests.step.owner.createModule(.{
- .source_file = .{ .path = "vendor/zig-js-runtime/src/pretty.zig" },
+ tests.root_module.addAnonymousImport("pretty", .{
+ .root_source_file = b.path("vendor/zig-js-runtime/src/pretty.zig"),
});
- tests.addModule("pretty", pretty);
const run_tests = b.addRunArtifact(tests);
if (b.args) |args| {
@@ -125,12 +125,11 @@ pub fn build(b: *std.build.Builder) !void {
// compile and install
const wpt = b.addExecutable(.{
.name = "browsercore-wpt",
- .root_source_file = .{ .path = "src/main_wpt.zig" },
+ .root_source_file = b.path("src/main_wpt.zig"),
.target = target,
.optimize = mode,
});
- try common(wpt, options);
- b.installArtifact(wpt);
+ try common(b, wpt, options);
// run
const wpt_cmd = b.addRunArtifact(wpt);
@@ -147,11 +146,11 @@ pub fn build(b: *std.build.Builder) !void {
// compile and install
const get = b.addExecutable(.{
.name = "browsercore-get",
- .root_source_file = .{ .path = "src/main_get.zig" },
+ .root_source_file = b.path("src/main_get.zig"),
.target = target,
.optimize = mode,
});
- try common(get, options);
+ try common(b, get, options);
b.installArtifact(get);
// run
@@ -165,25 +164,38 @@ pub fn build(b: *std.build.Builder) !void {
}
fn common(
+ b: *std.Build,
step: *std.Build.Step.Compile,
options: jsruntime.Options,
) !void {
- try jsruntime_pkgs.add(step, options);
- linkNetSurf(step);
-
- // link mimalloc
- step.addObjectFile(.{ .path = "vendor/mimalloc/out/libmimalloc.a" });
- step.addIncludePath(.{ .path = "vendor/mimalloc/out/include" });
+ const jsruntimemod = try jsruntime_pkgs.module(
+ b,
+ options,
+ step.root_module.optimize.?,
+ step.root_module.resolved_target.?,
+ );
+ step.root_module.addImport("jsruntime", jsruntimemod);
+
+ const netsurf = moduleNetSurf(b);
+ netsurf.addImport("jsruntime", jsruntimemod);
+ step.root_module.addImport("netsurf", netsurf);
}
-fn linkNetSurf(step: *std.build.LibExeObjStep) void {
-
+fn moduleNetSurf(b: *std.Build) *std.Build.Module {
+ const mod = b.addModule("netsurf", .{
+ .root_source_file = b.path("src/netsurf/netsurf.zig"),
+ });
// iconv
- step.addObjectFile(.{ .path = "vendor/libiconv/lib/libiconv.a" });
- step.addIncludePath(.{ .path = "vendor/libiconv/include" });
+ mod.addObjectFile(b.path("vendor/libiconv/lib/libiconv.a"));
+ mod.addIncludePath(b.path("vendor/libiconv/include"));
+
+ // mimalloc
+ mod.addImport("mimalloc", moduleMimalloc(b));
// netsurf libs
const ns = "vendor/netsurf";
+ mod.addIncludePath(b.path(ns ++ "/include"));
+
const libs: [4][]const u8 = .{
"libdom",
"libhubbub",
@@ -191,8 +203,20 @@ fn linkNetSurf(step: *std.build.LibExeObjStep) void {
"libwapcaplet",
};
inline for (libs) |lib| {
- step.addObjectFile(.{ .path = ns ++ "/lib/" ++ lib ++ ".a" });
- step.addIncludePath(.{ .path = ns ++ "/" ++ lib ++ "/src" });
+ mod.addObjectFile(b.path(ns ++ "/lib/" ++ lib ++ ".a"));
+ mod.addIncludePath(b.path(ns ++ "/" ++ lib ++ "/src"));
}
- step.addIncludePath(.{ .path = ns ++ "/include" });
+
+ return mod;
+}
+
+fn moduleMimalloc(b: *std.Build) *std.Build.Module {
+ const mod = b.addModule("mimalloc", .{
+ .root_source_file = b.path("src/mimalloc/mimalloc.zig"),
+ });
+
+ mod.addObjectFile(b.path("vendor/mimalloc/out/libmimalloc.a"));
+ mod.addIncludePath(b.path("vendor/mimalloc/out/include"));
+
+ return mod;
}
diff --git a/src/async/Client.zig b/src/async/Client.zig
index 0870c7c61..d4af40185 100644
--- a/src/async/Client.zig
+++ b/src/async/Client.zig
@@ -21,9 +21,6 @@
//! Connections are opened in a thread-safe manner, but individual Requests are not.
//!
//! TLS support may be disabled via `std.options.http_disable_tls`.
-//!
-//! This file is a copy of the original std.http.Client with little changes to
-//! handle non-blocking I/O with the jsruntime.Loop.
const std = @import("std");
const builtin = @import("builtin");
@@ -45,9 +42,7 @@ const tcp = @import("tcp.zig");
pub const disable_tls = std.options.http_disable_tls;
-/// Allocator used for all allocations made by the client.
-///
-/// This allocator must be thread-safe.
+/// Used for all client allocations. Must be thread-safe.
allocator: Allocator,
// std.net.Stream implementation using jsruntime Loop
@@ -63,14 +58,25 @@ next_https_rescan_certs: bool = true,
/// The pool of connections that can be reused (and currently in use).
connection_pool: ConnectionPool = .{},
-/// This is the proxy that will handle http:// connections. It *must not* be modified when the client has any active connections.
-http_proxy: ?Proxy = null,
-
-/// This is the proxy that will handle https:// connections. It *must not* be modified when the client has any active connections.
-https_proxy: ?Proxy = null,
+/// If populated, all http traffic travels through this third party.
+/// This field cannot be modified while the client has active connections.
+/// Pointer to externally-owned memory.
+http_proxy: ?*Proxy = null,
+/// If populated, all https traffic travels through this third party.
+/// This field cannot be modified while the client has active connections.
+/// Pointer to externally-owned memory.
+https_proxy: ?*Proxy = null,
/// A set of linked lists of connections that can be reused.
pub const ConnectionPool = struct {
+ mutex: std.Thread.Mutex = .{},
+ /// Open connections that are currently in use.
+ used: Queue = .{},
+ /// Open connections that are not currently in use.
+ free: Queue = .{},
+ free_len: usize = 0,
+ free_size: usize = 32,
+
/// The criteria for a connection to be considered a match.
pub const Criteria = struct {
host: []const u8,
@@ -81,14 +87,6 @@ pub const ConnectionPool = struct {
const Queue = std.DoublyLinkedList(Connection);
pub const Node = Queue.Node;
- mutex: std.Thread.Mutex = .{},
- /// Open connections that are currently in use.
- used: Queue = .{},
- /// Open connections that are not currently in use.
- free: Queue = .{},
- free_len: usize = 0,
- free_size: usize = 32,
-
/// Finds and acquires a connection from the connection pool matching the criteria. This function is threadsafe.
/// If no connection is found, null is returned.
pub fn findConnection(pool: *ConnectionPool, criteria: Criteria) ?*Connection {
@@ -135,7 +133,7 @@ pub const ConnectionPool = struct {
pool.mutex.lock();
defer pool.mutex.unlock();
- const node = @fieldParentPtr(Node, "data", connection);
+ const node: *Node = @fieldParentPtr("data", connection);
pool.used.remove(node);
@@ -217,11 +215,6 @@ pub const ConnectionPool = struct {
/// An interface to either a plain or TLS connection.
pub const Connection = struct {
- pub const buffer_size = std.crypto.tls.max_ciphertext_record_len;
- const BufferSize = std.math.IntFittingRange(0, buffer_size);
-
- pub const Protocol = enum { plain, tls };
-
stream: Stream,
/// undefined unless protocol is tls.
tls_client: if (!disable_tls) *std.crypto.tls.Client else void,
@@ -247,7 +240,12 @@ pub const Connection = struct {
read_buf: [buffer_size]u8 = undefined,
write_buf: [buffer_size]u8 = undefined,
- pub fn readvDirectTls(conn: *Connection, buffers: []std.os.iovec) ReadError!usize {
+ pub const buffer_size = std.crypto.tls.max_ciphertext_record_len;
+ const BufferSize = std.math.IntFittingRange(0, buffer_size);
+
+ pub const Protocol = enum { plain, tls };
+
+ pub fn readvDirectTls(conn: *Connection, buffers: []std.posix.iovec) ReadError!usize {
return conn.tls_client.readv(conn.stream, buffers) catch |err| {
// https://github.com/ziglang/zig/issues/2473
if (mem.startsWith(u8, @errorName(err), "TlsAlert")) return error.TlsAlert;
@@ -261,7 +259,7 @@ pub const Connection = struct {
};
}
- pub fn readvDirect(conn: *Connection, buffers: []std.os.iovec) ReadError!usize {
+ pub fn readvDirect(conn: *Connection, buffers: []std.posix.iovec) ReadError!usize {
if (conn.protocol == .tls) {
if (disable_tls) unreachable;
@@ -279,7 +277,7 @@ pub const Connection = struct {
pub fn fill(conn: *Connection) ReadError!void {
if (conn.read_end != conn.read_start) return;
- var iovecs = [1]std.os.iovec{
+ var iovecs = [1]std.posix.iovec{
.{ .iov_base = &conn.read_buf, .iov_len = conn.read_buf.len },
};
const nread = try conn.readvDirect(&iovecs);
@@ -315,7 +313,7 @@ pub const Connection = struct {
return available_read;
}
- var iovecs = [2]std.os.iovec{
+ var iovecs = [2]std.posix.iovec{
.{ .iov_base = buffer.ptr, .iov_len = buffer.len },
.{ .iov_base = &conn.read_buf, .iov_len = conn.read_buf.len },
};
@@ -367,7 +365,7 @@ pub const Connection = struct {
/// Writes the given buffer to the connection.
pub fn write(conn: *Connection, buffer: []const u8) WriteError!usize {
- if (conn.write_end + buffer.len > conn.write_buf.len) {
+ if (conn.write_buf.len - conn.write_end < buffer.len) {
try conn.flush();
if (buffer.len > conn.write_buf.len) {
@@ -382,6 +380,13 @@ pub const Connection = struct {
return buffer.len;
}
+ /// Returns a buffer to be filled with exactly len bytes to write to the connection.
+ pub fn allocWriteBuffer(conn: *Connection, len: BufferSize) WriteError![]u8 {
+ if (conn.write_buf.len - conn.write_end < len) try conn.flush();
+ defer conn.write_end += len;
+ return conn.write_buf[conn.write_end..][0..len];
+ }
+
/// Flushes the write buffer to the connection.
pub fn flush(conn: *Connection) WriteError!void {
if (conn.write_end == 0) return;
@@ -425,33 +430,65 @@ pub const RequestTransfer = union(enum) {
/// The decompressor for response messages.
pub const Compression = union(enum) {
- pub const DeflateDecompressor = std.compress.zlib.DecompressStream(Request.TransferReader);
- pub const GzipDecompressor = std.compress.gzip.Decompress(Request.TransferReader);
- pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Request.TransferReader, .{});
+ pub const DeflateDecompressor = std.compress.zlib.Decompressor(Request.TransferReader);
+ pub const GzipDecompressor = std.compress.gzip.Decompressor(Request.TransferReader);
+ // https://github.com/ziglang/zig/issues/18937
+ //pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Request.TransferReader, .{});
deflate: DeflateDecompressor,
gzip: GzipDecompressor,
- zstd: ZstdDecompressor,
+ // https://github.com/ziglang/zig/issues/18937
+ //zstd: ZstdDecompressor,
none: void,
};
/// A HTTP response originating from a server.
pub const Response = struct {
- pub const ParseError = Allocator.Error || error{
+ version: http.Version,
+ status: http.Status,
+ reason: []const u8,
+
+ /// Points into the user-provided `server_header_buffer`.
+ location: ?[]const u8 = null,
+ /// Points into the user-provided `server_header_buffer`.
+ content_type: ?[]const u8 = null,
+ /// Points into the user-provided `server_header_buffer`.
+ content_disposition: ?[]const u8 = null,
+
+ keep_alive: bool,
+
+ /// If present, the number of bytes in the response body.
+ content_length: ?u64 = null,
+
+ /// If present, the transfer encoding of the response body, otherwise none.
+ transfer_encoding: http.TransferEncoding = .none,
+
+ /// If present, the compression of the response body, otherwise identity (no compression).
+ transfer_compression: http.ContentEncoding = .identity,
+
+ parser: proto.HeadersParser,
+ compression: Compression = .none,
+
+ /// Whether the response body should be skipped. Any data read from the
+ /// response body will be discarded.
+ skip: bool = false,
+
+ pub const ParseError = error{
HttpHeadersInvalid,
HttpHeaderContinuationsUnsupported,
HttpTransferEncodingUnsupported,
HttpConnectionHeaderUnsupported,
InvalidContentLength,
- CompressionNotSupported,
+ CompressionUnsupported,
};
- pub fn parse(res: *Response, bytes: []const u8, trailing: bool) ParseError!void {
- var it = mem.tokenizeAny(u8, bytes, "\r\n");
+ pub fn parse(res: *Response, bytes: []const u8) ParseError!void {
+ var it = mem.splitSequence(u8, bytes, "\r\n");
- const first_line = it.next() orelse return error.HttpHeadersInvalid;
- if (first_line.len < 12)
+ const first_line = it.next().?;
+ if (first_line.len < 12) {
return error.HttpHeadersInvalid;
+ }
const version: http.Version = switch (int64(first_line[0..8])) {
int64("HTTP/1.0") => .@"HTTP/1.0",
@@ -465,25 +502,32 @@ pub const Response = struct {
res.version = version;
res.status = status;
res.reason = reason;
-
- res.headers.clearRetainingCapacity();
+ res.keep_alive = switch (version) {
+ .@"HTTP/1.0" => false,
+ .@"HTTP/1.1" => true,
+ };
while (it.next()) |line| {
- if (line.len == 0) return error.HttpHeadersInvalid;
+ if (line.len == 0) return;
switch (line[0]) {
' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
else => {},
}
- var line_it = mem.tokenizeAny(u8, line, ": ");
- const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
- const header_value = line_it.rest();
-
- try res.headers.append(header_name, header_value);
-
- if (trailing) continue;
-
- if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+ var line_it = mem.splitScalar(u8, line, ':');
+ const header_name = line_it.next().?;
+ const header_value = mem.trim(u8, line_it.rest(), " \t");
+ if (header_name.len == 0) return error.HttpHeadersInvalid;
+
+ if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+ res.keep_alive = !std.ascii.eqlIgnoreCase(header_value, "close");
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-type")) {
+ res.content_type = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "location")) {
+ res.location = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-disposition")) {
+ res.content_disposition = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
// Transfer-Encoding: second, first
// Transfer-Encoding: deflate, chunked
var iter = mem.splitBackwardsScalar(u8, header_value, ',');
@@ -529,6 +573,44 @@ pub const Response = struct {
}
}
}
+ return error.HttpHeadersInvalid; // missing empty line
+ }
+
+ test parse {
+ const response_bytes = "HTTP/1.1 200 OK\r\n" ++
+ "LOcation:url\r\n" ++
+ "content-tYpe: text/plain\r\n" ++
+ "content-disposition:attachment; filename=example.txt \r\n" ++
+ "content-Length:10\r\n" ++
+ "TRansfer-encoding:\tdeflate, chunked \r\n" ++
+ "connectioN:\t keep-alive \r\n\r\n";
+
+ var header_buffer: [1024]u8 = undefined;
+ var res = Response{
+ .status = undefined,
+ .reason = undefined,
+ .version = undefined,
+ .keep_alive = false,
+ .parser = proto.HeadersParser.init(&header_buffer),
+ };
+
+ @memcpy(header_buffer[0..response_bytes.len], response_bytes);
+ res.parser.header_bytes_len = response_bytes.len;
+
+ try res.parse(response_bytes);
+
+ try testing.expectEqual(.@"HTTP/1.1", res.version);
+ try testing.expectEqualStrings("OK", res.reason);
+ try testing.expectEqual(.ok, res.status);
+
+ try testing.expectEqualStrings("url", res.location.?);
+ try testing.expectEqualStrings("text/plain", res.content_type.?);
+ try testing.expectEqualStrings("attachment; filename=example.txt", res.content_disposition.?);
+
+ try testing.expectEqual(true, res.keep_alive);
+ try testing.expectEqual(10, res.content_length.?);
+ try testing.expectEqual(.chunked, res.transfer_encoding);
+ try testing.expectEqual(.deflate, res.transfer_compression);
}
inline fn int64(array: *const [8]u8) u64 {
@@ -552,60 +634,86 @@ pub const Response = struct {
try expectEqual(@as(u10, 999), parseInt3("999"));
}
- /// The HTTP version this response is using.
- version: http.Version,
-
- /// The status code of the response.
- status: http.Status,
-
- /// The reason phrase of the response.
- reason: []const u8,
-
- /// If present, the number of bytes in the response body.
- content_length: ?u64 = null,
-
- /// If present, the transfer encoding of the response body, otherwise none.
- transfer_encoding: http.TransferEncoding = .none,
+ pub fn iterateHeaders(r: Response) http.HeaderIterator {
+ return http.HeaderIterator.init(r.parser.get());
+ }
- /// If present, the compression of the response body, otherwise identity (no compression).
- transfer_compression: http.ContentEncoding = .identity,
+ test iterateHeaders {
+ const response_bytes = "HTTP/1.1 200 OK\r\n" ++
+ "LOcation:url\r\n" ++
+ "content-tYpe: text/plain\r\n" ++
+ "content-disposition:attachment; filename=example.txt \r\n" ++
+ "content-Length:10\r\n" ++
+ "TRansfer-encoding:\tdeflate, chunked \r\n" ++
+ "connectioN:\t keep-alive \r\n\r\n";
+
+ var header_buffer: [1024]u8 = undefined;
+ var res = Response{
+ .status = undefined,
+ .reason = undefined,
+ .version = undefined,
+ .keep_alive = false,
+ .parser = proto.HeadersParser.init(&header_buffer),
+ };
- /// The headers received from the server.
- headers: http.Headers,
- parser: proto.HeadersParser,
- compression: Compression = .none,
+ @memcpy(header_buffer[0..response_bytes.len], response_bytes);
+ res.parser.header_bytes_len = response_bytes.len;
- /// Whether the response body should be skipped. Any data read from the response body will be discarded.
- skip: bool = false,
+ var it = res.iterateHeaders();
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("LOcation", header.name);
+ try testing.expectEqualStrings("url", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("content-tYpe", header.name);
+ try testing.expectEqualStrings("text/plain", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("content-disposition", header.name);
+ try testing.expectEqualStrings("attachment; filename=example.txt", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("content-Length", header.name);
+ try testing.expectEqualStrings("10", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("TRansfer-encoding", header.name);
+ try testing.expectEqualStrings("deflate, chunked", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ {
+ const header = it.next().?;
+ try testing.expectEqualStrings("connectioN", header.name);
+ try testing.expectEqualStrings("keep-alive", header.value);
+ try testing.expect(!it.is_trailer);
+ }
+ try testing.expectEqual(null, it.next());
+ }
};
/// A HTTP request that has been sent.
///
/// Order of operations: open -> send[ -> write -> finish] -> wait -> read
pub const Request = struct {
- /// The uri that this request is being sent to.
uri: Uri,
-
- /// The client that this request was created from.
client: *Client,
-
- /// Underlying connection to the server. This is null when the connection is released.
+ /// This is null when the connection is released.
connection: ?*Connection,
+ keep_alive: bool,
method: http.Method,
version: http.Version = .@"HTTP/1.1",
-
- /// The list of HTTP request headers.
- headers: http.Headers,
-
- /// The transfer encoding of the request body.
- transfer_encoding: RequestTransfer = .none,
-
- /// The redirect quota left for this request.
- redirects_left: u32,
-
- /// Whether the request should follow redirects.
- handle_redirects: bool,
+ transfer_encoding: RequestTransfer,
+ redirect_behavior: RedirectBehavior,
/// Whether the request should handle a 100-continue response before sending the request body.
handle_continue: bool,
@@ -615,25 +723,60 @@ pub const Request = struct {
/// This field is undefined until `wait` is called.
response: Response,
- /// Used as a allocator for resolving redirects locations.
- arena: std.heap.ArenaAllocator,
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Headers,
+
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header,
+
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header,
+
+ pub const Headers = struct {
+ host: Value = .default,
+ authorization: Value = .default,
+ user_agent: Value = .default,
+ connection: Value = .default,
+ accept_encoding: Value = .default,
+ content_type: Value = .default,
+
+ pub const Value = union(enum) {
+ default,
+ omit,
+ override: []const u8,
+ };
+ };
- /// Frees all resources associated with the request.
- pub fn deinit(req: *Request) void {
- switch (req.response.compression) {
- .none => {},
- .deflate => |*deflate| deflate.deinit(),
- .gzip => |*gzip| gzip.deinit(),
- .zstd => |*zstd| zstd.deinit(),
+ /// Any value other than `not_allowed` or `unhandled` means that integer represents
+ /// how many remaining redirects are allowed.
+ pub const RedirectBehavior = enum(u16) {
+ /// The next redirect will cause an error.
+ not_allowed = 0,
+ /// Redirects are passed to the client to analyze the redirect response
+ /// directly.
+ unhandled = std.math.maxInt(u16),
+ _,
+
+ pub fn subtractOne(rb: *RedirectBehavior) void {
+ switch (rb.*) {
+ .not_allowed => unreachable,
+ .unhandled => unreachable,
+ _ => rb.* = @enumFromInt(@intFromEnum(rb.*) - 1),
+ }
}
- req.headers.deinit();
- req.response.headers.deinit();
-
- if (req.response.parser.header_bytes_owned) {
- req.response.parser.header_bytes.deinit(req.client.allocator);
+ pub fn remaining(rb: RedirectBehavior) u16 {
+ assert(rb != .unhandled);
+ return @intFromEnum(rb);
}
+ };
+ /// Frees all resources associated with the request.
+ pub fn deinit(req: *Request) void {
if (req.connection) |connection| {
if (!req.response.parser.done) {
// If the response wasn't fully read, then we need to close the connection.
@@ -641,62 +784,74 @@ pub const Request = struct {
}
req.client.connection_pool.release(req.client.allocator, connection);
}
-
- req.arena.deinit();
req.* = undefined;
}
- // This function must deallocate all resources associated with the request, or keep those which will be used
- // This needs to be kept in sync with deinit and request
+ // This function must deallocate all resources associated with the request,
+ // or keep those which will be used.
+ // This needs to be kept in sync with deinit and request.
fn redirect(req: *Request, uri: Uri) !void {
assert(req.response.parser.done);
- switch (req.response.compression) {
- .none => {},
- .deflate => |*deflate| deflate.deinit(),
- .gzip => |*gzip| gzip.deinit(),
- .zstd => |*zstd| zstd.deinit(),
- }
-
req.client.connection_pool.release(req.client.allocator, req.connection.?);
req.connection = null;
- const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUrlScheme;
+ var server_header = std.heap.FixedBufferAllocator.init(req.response.parser.header_bytes_buffer);
+ defer req.response.parser.header_bytes_buffer = server_header.buffer[server_header.end_index..];
+ const protocol, const valid_uri = try validateUri(uri, server_header.allocator());
+
+ const new_host = valid_uri.host.?.raw;
+ const prev_host = req.uri.host.?.raw;
+ const keep_privileged_headers =
+ std.ascii.eqlIgnoreCase(valid_uri.scheme, req.uri.scheme) and
+ std.ascii.endsWithIgnoreCase(new_host, prev_host) and
+ (new_host.len == prev_host.len or new_host[new_host.len - prev_host.len - 1] == '.');
+ if (!keep_privileged_headers) {
+ // When redirecting to a different domain, strip privileged headers.
+ req.privileged_headers = &.{};
+ }
- const port: u16 = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- };
+ if (switch (req.response.status) {
+ .see_other => true,
+ .moved_permanently, .found => req.method == .POST,
+ else => false,
+ }) {
+ // A redirect to a GET must change the method and remove the body.
+ req.method = .GET;
+ req.transfer_encoding = .none;
+ req.headers.content_type = .omit;
+ }
- const host = uri.host orelse return error.UriMissingHost;
+ if (req.transfer_encoding != .none) {
+ // The request body has already been sent. The request is
+ // still in a valid state, but the redirect must be handled
+ // manually.
+ return error.RedirectRequiresResend;
+ }
- req.uri = uri;
- req.connection = try req.client.connect(host, port, protocol);
- req.redirects_left -= 1;
- req.response.headers.clearRetainingCapacity();
+ req.uri = valid_uri;
+ req.connection = try req.client.connect(new_host, uriPort(valid_uri, protocol), protocol);
+ req.redirect_behavior.subtractOne();
req.response.parser.reset();
req.response = .{
+ .version = undefined,
.status = undefined,
.reason = undefined,
- .version = undefined,
- .headers = req.response.headers,
+ .keep_alive = undefined,
.parser = req.response.parser,
};
}
pub const SendError = Connection.WriteError || error{ InvalidContentLength, UnsupportedTransferEncoding };
- pub const SendOptions = struct {
- /// Specifies that the uri should be used as is. You guarantee that the uri is already escaped.
- raw_uri: bool = false,
- };
-
/// Send the HTTP request headers to the server.
- pub fn send(req: *Request, options: SendOptions) SendError!void {
- if (!req.method.requestHasBody() and req.transfer_encoding != .none) return error.UnsupportedTransferEncoding;
+ pub fn send(req: *Request) SendError!void {
+ if (!req.method.requestHasBody() and req.transfer_encoding != .none)
+ return error.UnsupportedTransferEncoding;
- const w = req.connection.?.writer();
+ const connection = req.connection.?;
+ const w = connection.writer();
try req.method.write(w);
try w.writeByte(' ');
@@ -705,98 +860,104 @@ pub const Request = struct {
try req.uri.writeToStream(.{ .authority = true }, w);
} else {
try req.uri.writeToStream(.{
- .scheme = req.connection.?.proxied,
- .authentication = req.connection.?.proxied,
- .authority = req.connection.?.proxied,
+ .scheme = connection.proxied,
+ .authentication = connection.proxied,
+ .authority = connection.proxied,
.path = true,
.query = true,
- .raw = options.raw_uri,
}, w);
}
try w.writeByte(' ');
try w.writeAll(@tagName(req.version));
try w.writeAll("\r\n");
- if (!req.headers.contains("host")) {
- try w.writeAll("Host: ");
+ if (try emitOverridableHeader("host: ", req.headers.host, w)) {
+ try w.writeAll("host: ");
try req.uri.writeToStream(.{ .authority = true }, w);
try w.writeAll("\r\n");
}
- if (!req.headers.contains("user-agent")) {
- try w.writeAll("User-Agent: zig/");
+ if (try emitOverridableHeader("authorization: ", req.headers.authorization, w)) {
+ if (req.uri.user != null or req.uri.password != null) {
+ try w.writeAll("authorization: ");
+ const authorization = try connection.allocWriteBuffer(
+ @intCast(basic_authorization.valueLengthFromUri(req.uri)),
+ );
+ assert(basic_authorization.value(req.uri, authorization).len == authorization.len);
+ try w.writeAll("\r\n");
+ }
+ }
+
+ if (try emitOverridableHeader("user-agent: ", req.headers.user_agent, w)) {
+ try w.writeAll("user-agent: zig/");
try w.writeAll(builtin.zig_version_string);
try w.writeAll(" (std.http)\r\n");
}
- if (!req.headers.contains("connection")) {
- try w.writeAll("Connection: keep-alive\r\n");
+ if (try emitOverridableHeader("connection: ", req.headers.connection, w)) {
+ if (req.keep_alive) {
+ try w.writeAll("connection: keep-alive\r\n");
+ } else {
+ try w.writeAll("connection: close\r\n");
+ }
}
- if (!req.headers.contains("accept-encoding")) {
- try w.writeAll("Accept-Encoding: gzip, deflate, zstd\r\n");
+ if (try emitOverridableHeader("accept-encoding: ", req.headers.accept_encoding, w)) {
+ // https://github.com/ziglang/zig/issues/18937
+ //try w.writeAll("accept-encoding: gzip, deflate, zstd\r\n");
+ try w.writeAll("accept-encoding: gzip, deflate\r\n");
}
- if (!req.headers.contains("te")) {
- try w.writeAll("TE: gzip, deflate, trailers\r\n");
+ switch (req.transfer_encoding) {
+ .chunked => try w.writeAll("transfer-encoding: chunked\r\n"),
+ .content_length => |len| try w.print("content-length: {d}\r\n", .{len}),
+ .none => {},
}
- const has_transfer_encoding = req.headers.contains("transfer-encoding");
- const has_content_length = req.headers.contains("content-length");
-
- if (!has_transfer_encoding and !has_content_length) {
- switch (req.transfer_encoding) {
- .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
- .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
- .none => {},
- }
- } else {
- if (has_transfer_encoding) {
- const transfer_encoding = req.headers.getFirstValue("transfer-encoding").?;
- if (std.mem.eql(u8, transfer_encoding, "chunked")) {
- req.transfer_encoding = .chunked;
- } else {
- return error.UnsupportedTransferEncoding;
- }
- } else if (has_content_length) {
- const content_length = std.fmt.parseInt(u64, req.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
-
- req.transfer_encoding = .{ .content_length = content_length };
- } else {
- req.transfer_encoding = .none;
- }
+ if (try emitOverridableHeader("content-type: ", req.headers.content_type, w)) {
+ // The default is to omit content-type if not provided because
+ // "application/octet-stream" is redundant.
}
- for (req.headers.list.items) |entry| {
- if (entry.value.len == 0) continue;
+ for (req.extra_headers) |header| {
+ assert(header.name.len != 0);
- try w.writeAll(entry.name);
+ try w.writeAll(header.name);
try w.writeAll(": ");
- try w.writeAll(entry.value);
+ try w.writeAll(header.value);
try w.writeAll("\r\n");
}
- if (req.connection.?.proxied) {
- const proxy_headers: ?http.Headers = switch (req.connection.?.protocol) {
- .plain => if (req.client.http_proxy) |proxy| proxy.headers else null,
- .tls => if (req.client.https_proxy) |proxy| proxy.headers else null,
- };
-
- if (proxy_headers) |headers| {
- for (headers.list.items) |entry| {
- if (entry.value.len == 0) continue;
+ if (connection.proxied) proxy: {
+ const proxy = switch (connection.protocol) {
+ .plain => req.client.http_proxy,
+ .tls => req.client.https_proxy,
+ } orelse break :proxy;
- try w.writeAll(entry.name);
- try w.writeAll(": ");
- try w.writeAll(entry.value);
- try w.writeAll("\r\n");
- }
- }
+ const authorization = proxy.authorization orelse break :proxy;
+ try w.writeAll("proxy-authorization: ");
+ try w.writeAll(authorization);
+ try w.writeAll("\r\n");
}
try w.writeAll("\r\n");
- try req.connection.?.flush();
+ try connection.flush();
+ }
+
+ /// Returns true if the default behavior is required, otherwise handles
+ /// writing (or not writing) the header.
+ fn emitOverridableHeader(prefix: []const u8, v: Headers.Value, w: anytype) !bool {
+ switch (v) {
+ .default => return true,
+ .omit => return false,
+ .override => |x| {
+ try w.writeAll(prefix);
+ try w.writeAll(x);
+ try w.writeAll("\r\n");
+ return false;
+ },
+ }
}
const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
@@ -820,145 +981,136 @@ pub const Request = struct {
return index;
}
- pub const WaitError = RequestError || SendError || TransferReadError || proto.HeadersParser.CheckCompleteHeadError || Response.ParseError || Uri.ParseError || error{ TooManyHttpRedirects, RedirectRequiresResend, HttpRedirectMissingLocation, CompressionInitializationFailed, CompressionNotSupported };
+ pub const WaitError = RequestError || SendError || TransferReadError ||
+ proto.HeadersParser.CheckCompleteHeadError || Response.ParseError ||
+ error{ // TODO: file zig fmt issue for this bad indentation
+ TooManyHttpRedirects,
+ RedirectRequiresResend,
+ HttpRedirectLocationMissing,
+ HttpRedirectLocationInvalid,
+ CompressionInitializationFailed,
+ CompressionUnsupported,
+ };
/// Waits for a response from the server and parses any headers that are sent.
/// This function will block until the final response is received.
///
- /// If `handle_redirects` is true and the request has no payload, then this function will automatically follow
- /// redirects. If a request payload is present, then this function will error with error.RedirectRequiresResend.
+ /// If handling redirects and the request has no payload, then this
+ /// function will automatically follow redirects. If a request payload is
+ /// present, then this function will error with
+ /// error.RedirectRequiresResend.
///
- /// Must be called after `send` and, if any data was written to the request body, then also after `finish`.
+ /// Must be called after `send` and, if any data was written to the request
+ /// body, then also after `finish`.
pub fn wait(req: *Request) WaitError!void {
- while (true) { // handle redirects
+ while (true) {
+ // This while loop is for handling redirects, which means the request's
+ // connection may be different than the previous iteration. However, it
+ // is still guaranteed to be non-null with each iteration of this loop.
+ const connection = req.connection.?;
+
while (true) { // read headers
- try req.connection.?.fill();
+ try connection.fill();
- const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.peek());
- req.connection.?.drop(@intCast(nchecked));
+ const nchecked = try req.response.parser.checkCompleteHead(connection.peek());
+ connection.drop(@intCast(nchecked));
if (req.response.parser.state.isContent()) break;
}
- try req.response.parse(req.response.parser.header_bytes.items, false);
+ try req.response.parse(req.response.parser.get());
if (req.response.status == .@"continue") {
- req.response.parser.done = true; // we're done parsing the continue response, reset to prepare for the real response
+ // We're done parsing the continue response; reset to prepare
+ // for the real response.
+ req.response.parser.done = true;
req.response.parser.reset();
if (req.handle_continue)
continue;
- return; // we're not handling the 100-continue, return to the caller
+ return; // we're not handling the 100-continue
}
// we're switching protocols, so this connection is no longer doing http
if (req.method == .CONNECT and req.response.status.class() == .success) {
- req.connection.?.closing = false;
+ connection.closing = false;
req.response.parser.done = true;
-
- return; // the connection is not HTTP past this point, return to the caller
+ return; // the connection is not HTTP past this point
}
- // we default to using keep-alive if not provided in the client if the server asks for it
- const req_connection = req.headers.getFirstValue("connection");
- const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
+ connection.closing = !req.response.keep_alive or !req.keep_alive;
- const res_connection = req.response.headers.getFirstValue("connection");
- const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?);
- if (res_keepalive and (req_keepalive or req_connection == null)) {
- req.connection.?.closing = false;
- } else {
- req.connection.?.closing = true;
- }
-
- // Any response to a HEAD request and any response with a 1xx (Informational), 204 (No Content), or 304 (Not Modified)
- // status code is always terminated by the first empty line after the header fields, regardless of the header fields
- // present in the message
- if (req.method == .HEAD or req.response.status.class() == .informational or req.response.status == .no_content or req.response.status == .not_modified) {
+ // Any response to a HEAD request and any response with a 1xx
+ // (Informational), 204 (No Content), or 304 (Not Modified) status
+ // code is always terminated by the first empty line after the
+ // header fields, regardless of the header fields present in the
+ // message.
+ if (req.method == .HEAD or req.response.status.class() == .informational or
+ req.response.status == .no_content or req.response.status == .not_modified)
+ {
req.response.parser.done = true;
-
- return; // the response is empty, no further setup or redirection is necessary
+ return; // The response is empty; no further setup or redirection is necessary.
}
- if (req.response.transfer_encoding != .none) {
- switch (req.response.transfer_encoding) {
- .none => unreachable,
- .chunked => {
- req.response.parser.next_chunk_length = 0;
- req.response.parser.state = .chunk_head_size;
- },
- }
- } else if (req.response.content_length) |cl| {
- req.response.parser.next_chunk_length = cl;
+ switch (req.response.transfer_encoding) {
+ .none => {
+ if (req.response.content_length) |cl| {
+ req.response.parser.next_chunk_length = cl;
- if (cl == 0) req.response.parser.done = true;
- } else {
- // read until the connection is closed
- req.response.parser.next_chunk_length = std.math.maxInt(u64);
+ if (cl == 0) req.response.parser.done = true;
+ } else {
+ // read until the connection is closed
+ req.response.parser.next_chunk_length = std.math.maxInt(u64);
+ }
+ },
+ .chunked => {
+ req.response.parser.next_chunk_length = 0;
+ req.response.parser.state = .chunk_head_size;
+ },
}
- if (req.response.status.class() == .redirect and req.handle_redirects) {
+ if (req.response.status.class() == .redirect and req.redirect_behavior != .unhandled) {
+ // skip the body of the redirect response, this will at least
+ // leave the connection in a known good state.
req.response.skip = true;
-
- // skip the body of the redirect response, this will at least leave the connection in a known good state.
- const empty = @as([*]u8, undefined)[0..0];
- assert(try req.transferRead(empty) == 0); // we're skipping, no buffer is necessary
-
- if (req.redirects_left == 0) return error.TooManyHttpRedirects;
-
- const location = req.response.headers.getFirstValue("location") orelse
- return error.HttpRedirectMissingLocation;
-
- const arena = req.arena.allocator();
-
- const location_duped = try arena.dupe(u8, location);
-
- const new_url = Uri.parse(location_duped) catch try Uri.parseWithoutScheme(location_duped);
- const resolved_url = try req.uri.resolve(new_url, false, arena);
-
- // is the redirect location on the same domain, or a subdomain of the original request?
- const is_same_domain_or_subdomain = std.ascii.endsWithIgnoreCase(resolved_url.host.?, req.uri.host.?) and (resolved_url.host.?.len == req.uri.host.?.len or resolved_url.host.?[resolved_url.host.?.len - req.uri.host.?.len - 1] == '.');
-
- if (resolved_url.host == null or !is_same_domain_or_subdomain or !std.ascii.eqlIgnoreCase(resolved_url.scheme, req.uri.scheme)) {
- // we're redirecting to a different domain, strip privileged headers like cookies
- _ = req.headers.delete("authorization");
- _ = req.headers.delete("www-authenticate");
- _ = req.headers.delete("cookie");
- _ = req.headers.delete("cookie2");
- }
-
- if (req.response.status == .see_other or ((req.response.status == .moved_permanently or req.response.status == .found) and req.method == .POST)) {
- // we're redirecting to a GET, so we need to change the method and remove the body
- req.method = .GET;
- req.transfer_encoding = .none;
- _ = req.headers.delete("transfer-encoding");
- _ = req.headers.delete("content-length");
- _ = req.headers.delete("content-type");
- }
-
- if (req.transfer_encoding != .none) {
- return error.RedirectRequiresResend; // The request body has already been sent. The request is still in a valid state, but the redirect must be handled manually.
- }
-
- try req.redirect(resolved_url);
-
- try req.send(.{});
+ assert(try req.transferRead(&.{}) == 0); // we're skipping, no buffer is necessary
+
+ if (req.redirect_behavior == .not_allowed) return error.TooManyHttpRedirects;
+
+ const location = req.response.location orelse
+ return error.HttpRedirectLocationMissing;
+
+ // This mutates the beginning of header_bytes_buffer and uses that
+ // for the backing memory of the returned Uri.
+ try req.redirect(req.uri.resolve_inplace(
+ location,
+ &req.response.parser.header_bytes_buffer,
+ ) catch |err| switch (err) {
+ error.UnexpectedCharacter,
+ error.InvalidFormat,
+ error.InvalidPort,
+ => return error.HttpRedirectLocationInvalid,
+ error.NoSpaceLeft => return error.HttpHeadersOversize,
+ });
+ try req.send();
} else {
req.response.skip = false;
if (!req.response.parser.done) {
switch (req.response.transfer_compression) {
.identity => req.response.compression = .none,
- .compress, .@"x-compress" => return error.CompressionNotSupported,
+ .compress, .@"x-compress" => return error.CompressionUnsupported,
.deflate => req.response.compression = .{
- .deflate = std.compress.zlib.decompressStream(req.client.allocator, req.transferReader()) catch return error.CompressionInitializationFailed,
+ .deflate = std.compress.zlib.decompressor(req.transferReader()),
},
.gzip, .@"x-gzip" => req.response.compression = .{
- .gzip = std.compress.gzip.decompress(req.client.allocator, req.transferReader()) catch return error.CompressionInitializationFailed,
- },
- .zstd => req.response.compression = .{
- .zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
+ .gzip = std.compress.gzip.decompressor(req.transferReader()),
},
+ // https://github.com/ziglang/zig/issues/18937
+ //.zstd => req.response.compression = .{
+ // .zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
+ //},
+ .zstd => return error.CompressionUnsupported,
}
}
@@ -967,7 +1119,8 @@ pub const Request = struct {
}
}
- pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError || error{ DecompressionFailure, InvalidTrailers };
+ pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError ||
+ error{ DecompressionFailure, InvalidTrailers };
pub const Reader = std.io.Reader(*Request, ReadError, read);
@@ -980,28 +1133,20 @@ pub const Request = struct {
const out_index = switch (req.response.compression) {
.deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
- .zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
+ // https://github.com/ziglang/zig/issues/18937
+ //.zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
else => try req.transferRead(buffer),
};
+ if (out_index > 0) return out_index;
- if (out_index == 0) {
- const has_trail = !req.response.parser.state.isContent();
-
- while (!req.response.parser.state.isContent()) { // read trailing headers
- try req.connection.?.fill();
+ while (!req.response.parser.state.isContent()) { // read trailing headers
+ try req.connection.?.fill();
- const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.peek());
- req.connection.?.drop(@intCast(nchecked));
- }
-
- if (has_trail) {
- // The response headers before the trailers are already guaranteed to be valid, so they will always be parsed again and cannot return an error.
- // This will *only* fail for a malformed trailer.
- req.response.parse(req.response.parser.header_bytes.items, true) catch return error.InvalidTrailers;
- }
+ const nchecked = try req.response.parser.checkCompleteHead(req.connection.?.peek());
+ req.connection.?.drop(@intCast(nchecked));
}
- return out_index;
+ return 0;
}
/// Reads data from the response body. Must be called after `wait`.
@@ -1028,9 +1173,11 @@ pub const Request = struct {
pub fn write(req: *Request, bytes: []const u8) WriteError!usize {
switch (req.transfer_encoding) {
.chunked => {
- try req.connection.?.writer().print("{x}\r\n", .{bytes.len});
- try req.connection.?.writer().writeAll(bytes);
- try req.connection.?.writer().writeAll("\r\n");
+ if (bytes.len > 0) {
+ try req.connection.?.writer().print("{x}\r\n", .{bytes.len});
+ try req.connection.?.writer().writeAll(bytes);
+ try req.connection.?.writer().writeAll("\r\n");
+ }
return bytes.len;
},
@@ -1069,16 +1216,12 @@ pub const Request = struct {
}
};
-/// A HTTP proxy server.
pub const Proxy = struct {
- allocator: Allocator,
- headers: http.Headers,
-
protocol: Connection.Protocol,
host: []const u8,
+ authorization: ?[]const u8,
port: u16,
-
- supports_connect: bool = true,
+ supports_connect: bool,
};
/// Release all associated resources with the client.
@@ -1090,133 +1233,103 @@ pub fn deinit(client: *Client) void {
client.connection_pool.deinit(client.allocator);
- if (client.http_proxy) |*proxy| {
- proxy.allocator.free(proxy.host);
- proxy.headers.deinit();
- }
-
- if (client.https_proxy) |*proxy| {
- proxy.allocator.free(proxy.host);
- proxy.headers.deinit();
- }
-
if (!disable_tls)
client.ca_bundle.deinit(client.allocator);
client.* = undefined;
}
-/// Uses the *_proxy environment variable to set any unset proxies for the client.
-/// This function *must not* be called when the client has any active connections.
-pub fn loadDefaultProxies(client: *Client) !void {
+/// Populates `http_proxy` and `https_proxy` via standard proxy environment variables.
+/// Asserts the client has no active connections.
+/// Uses `arena` for a few small allocations that must outlive the client, or
+/// at least until those fields are set to different values.
+pub fn initDefaultProxies(client: *Client, arena: Allocator) !void {
// Prevent any new connections from being created.
client.connection_pool.mutex.lock();
defer client.connection_pool.mutex.unlock();
- assert(client.connection_pool.used.first == null); // There are still active requests.
-
- if (client.http_proxy == null) http: {
- const content: []const u8 = if (std.process.hasEnvVarConstant("http_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "http_proxy")
- else if (std.process.hasEnvVarConstant("HTTP_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "HTTP_PROXY")
- else if (std.process.hasEnvVarConstant("all_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "all_proxy")
- else if (std.process.hasEnvVarConstant("ALL_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "ALL_PROXY")
- else
- break :http;
- defer client.allocator.free(content);
-
- const uri = Uri.parse(content) catch
- Uri.parseWithoutScheme(content) catch
- break :http;
-
- const protocol = if (uri.scheme.len == 0)
- .plain // No scheme, assume http://
- else
- protocol_map.get(uri.scheme) orelse break :http; // Unknown scheme, ignore
-
- const host = if (uri.host) |host| try client.allocator.dupe(u8, host) else break :http; // Missing host, ignore
- client.http_proxy = .{
- .allocator = client.allocator,
- .headers = .{ .allocator = client.allocator },
-
- .protocol = protocol,
- .host = host,
- .port = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- },
- };
+ assert(client.connection_pool.used.first == null); // There are active requests.
- if (uri.user != null and uri.password != null) {
- const prefix = "Basic ";
+ if (client.http_proxy == null) {
+ client.http_proxy = try createProxyFromEnvVar(arena, &.{
+ "http_proxy", "HTTP_PROXY", "all_proxy", "ALL_PROXY",
+ });
+ }
- const unencoded = try std.fmt.allocPrint(client.allocator, "{s}:{s}", .{ uri.user.?, uri.password.? });
- defer client.allocator.free(unencoded);
+ if (client.https_proxy == null) {
+ client.https_proxy = try createProxyFromEnvVar(arena, &.{
+ "https_proxy", "HTTPS_PROXY", "all_proxy", "ALL_PROXY",
+ });
+ }
+}
- const buffer = try client.allocator.alloc(u8, std.base64.standard.Encoder.calcSize(unencoded.len) + prefix.len);
- defer client.allocator.free(buffer);
+fn createProxyFromEnvVar(arena: Allocator, env_var_names: []const []const u8) !?*Proxy {
+ const content = for (env_var_names) |name| {
+ break std.process.getEnvVarOwned(arena, name) catch |err| switch (err) {
+ error.EnvironmentVariableNotFound => continue,
+ else => |e| return e,
+ };
+ } else return null;
- const result = std.base64.standard.Encoder.encode(buffer[prefix.len..], unencoded);
- @memcpy(buffer[0..prefix.len], prefix);
+ const uri = Uri.parse(content) catch try Uri.parseAfterScheme("http", content);
+ const protocol, const valid_uri = validateUri(uri, arena) catch |err| switch (err) {
+ error.UnsupportedUriScheme => return null,
+ error.UriMissingHost => return error.HttpProxyMissingHost,
+ error.OutOfMemory => |e| return e,
+ };
- try client.http_proxy.?.headers.append("proxy-authorization", result);
- }
- }
+ const authorization: ?[]const u8 = if (valid_uri.user != null or valid_uri.password != null) a: {
+ const authorization = try arena.alloc(u8, basic_authorization.valueLengthFromUri(valid_uri));
+ assert(basic_authorization.value(valid_uri, authorization).len == authorization.len);
+ break :a authorization;
+ } else null;
- if (client.https_proxy == null) https: {
- const content: []const u8 = if (std.process.hasEnvVarConstant("https_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "https_proxy")
- else if (std.process.hasEnvVarConstant("HTTPS_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "HTTPS_PROXY")
- else if (std.process.hasEnvVarConstant("all_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "all_proxy")
- else if (std.process.hasEnvVarConstant("ALL_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "ALL_PROXY")
- else
- break :https;
- defer client.allocator.free(content);
-
- const uri = Uri.parse(content) catch
- Uri.parseWithoutScheme(content) catch
- break :https;
-
- const protocol = if (uri.scheme.len == 0)
- .plain // No scheme, assume http://
- else
- protocol_map.get(uri.scheme) orelse break :https; // Unknown scheme, ignore
-
- const host = if (uri.host) |host| try client.allocator.dupe(u8, host) else break :https; // Missing host, ignore
- client.https_proxy = .{
- .allocator = client.allocator,
- .headers = .{ .allocator = client.allocator },
-
- .protocol = protocol,
- .host = host,
- .port = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- },
- };
+ const proxy = try arena.create(Proxy);
+ proxy.* = .{
+ .protocol = protocol,
+ .host = valid_uri.host.?.raw,
+ .authorization = authorization,
+ .port = uriPort(valid_uri, protocol),
+ .supports_connect = true,
+ };
+ return proxy;
+}
- if (uri.user != null and uri.password != null) {
- const prefix = "Basic ";
+pub const basic_authorization = struct {
+ pub const max_user_len = 255;
+ pub const max_password_len = 255;
+ pub const max_value_len = valueLength(max_user_len, max_password_len);
- const unencoded = try std.fmt.allocPrint(client.allocator, "{s}:{s}", .{ uri.user.?, uri.password.? });
- defer client.allocator.free(unencoded);
+ const prefix = "Basic ";
- const buffer = try client.allocator.alloc(u8, std.base64.standard.Encoder.calcSize(unencoded.len) + prefix.len);
- defer client.allocator.free(buffer);
+ pub fn valueLength(user_len: usize, password_len: usize) usize {
+ return prefix.len + std.base64.standard.Encoder.calcSize(user_len + 1 + password_len);
+ }
- const result = std.base64.standard.Encoder.encode(buffer[prefix.len..], unencoded);
- @memcpy(buffer[0..prefix.len], prefix);
+ pub fn valueLengthFromUri(uri: Uri) usize {
+ var stream = std.io.countingWriter(std.io.null_writer);
+ try stream.writer().print("{user}", .{uri.user orelse Uri.Component.empty});
+ const user_len = stream.bytes_written;
+ stream.bytes_written = 0;
+ try stream.writer().print("{password}", .{uri.password orelse Uri.Component.empty});
+ const password_len = stream.bytes_written;
+ return valueLength(@intCast(user_len), @intCast(password_len));
+ }
- try client.https_proxy.?.headers.append("proxy-authorization", result);
- }
+ pub fn value(uri: Uri, out: []u8) []u8 {
+ var buf: [max_user_len + ":".len + max_password_len]u8 = undefined;
+ var stream = std.io.fixedBufferStream(&buf);
+ stream.writer().print("{user}", .{uri.user orelse Uri.Component.empty}) catch
+ unreachable;
+ assert(stream.pos <= max_user_len);
+ stream.writer().print(":{password}", .{uri.password orelse Uri.Component.empty}) catch
+ unreachable;
+
+ @memcpy(out[0..prefix.len], prefix);
+ const base64 = std.base64.standard.Encoder.encode(out[prefix.len..], stream.getWritten());
+ return out[0 .. prefix.len + base64.len];
}
-}
+};
pub const ConnectTcpError = Allocator.Error || error{ ConnectionRefused, NetworkUnreachable, ConnectionTimedOut, ConnectionResetByPeer, TemporaryNameServerFailure, NameServerFailure, UnknownHostName, HostLacksNetworkAddresses, UnexpectedConnectFailure, TlsInitializationFailed };
@@ -1228,8 +1341,7 @@ pub fn connectTcp(client: *Client, host: []const u8, port: u16, protocol: Connec
.host = host,
.port = port,
.protocol = protocol,
- })) |node|
- return node;
+ })) |node| return node;
if (disable_tls and protocol == .tls)
return error.TlsInitializationFailed;
@@ -1278,7 +1390,8 @@ pub fn connectTcp(client: *Client, host: []const u8, port: u16, protocol: Connec
return &conn.data;
}
-/// Connect to `tunnel_host:tunnel_port` using the specified proxy with HTTP CONNECT. This will reuse a connection if one is already open.
+/// Connect to `tunnel_host:tunnel_port` using the specified proxy with HTTP
+/// CONNECT. This will reuse a connection if one is already open.
///
/// This function is threadsafe.
pub fn connectTunnel(
@@ -1304,31 +1417,22 @@ pub fn connectTunnel(
client.connection_pool.release(client.allocator, conn);
}
- const uri = Uri{
+ var buffer: [8096]u8 = undefined;
+ var req = client.open(.CONNECT, .{
.scheme = "http",
- .user = null,
- .password = null,
- .host = tunnel_host,
+ .host = .{ .raw = tunnel_host },
.port = tunnel_port,
- .path = "",
- .query = null,
- .fragment = null,
- };
-
- // we can use a small buffer here because a CONNECT response should be very small
- var buffer: [8096]u8 = undefined;
-
- var req = client.open(.CONNECT, uri, proxy.headers, .{
- .handle_redirects = false,
+ }, .{
+ .redirect_behavior = .unhandled,
.connection = conn,
- .header_strategy = .{ .static = &buffer },
+ .server_header_buffer = &buffer,
}) catch |err| {
std.log.debug("err {}", .{err});
break :tunnel err;
};
defer req.deinit();
- req.send(.{ .raw_uri = true }) catch |err| break :tunnel err;
+ req.send() catch |err| break :tunnel err;
req.wait() catch |err| break :tunnel err;
if (req.response.status.class() == .server_error) {
@@ -1357,49 +1461,55 @@ pub fn connectTunnel(
}
// Prevents a dependency loop in open()
-const ConnectErrorPartial = ConnectTcpError || error{ UnsupportedUrlScheme, ConnectionRefused };
+const ConnectErrorPartial = ConnectTcpError || error{ UnsupportedUriScheme, ConnectionRefused };
pub const ConnectError = ConnectErrorPartial || RequestError;
-/// Connect to `host:port` using the specified protocol. This will reuse a connection if one is already open.
-/// If a proxy is configured for the client, then the proxy will be used to connect to the host.
+/// Connect to `host:port` using the specified protocol. This will reuse a
+/// connection if one is already open.
+/// If a proxy is configured for the client, then the proxy will be used to
+/// connect to the host.
///
/// This function is threadsafe.
-pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*Connection {
- // pointer required so that `supports_connect` can be updated if a CONNECT fails
- const potential_proxy: ?*Proxy = switch (protocol) {
- .plain => if (client.http_proxy) |*proxy_info| proxy_info else null,
- .tls => if (client.https_proxy) |*proxy_info| proxy_info else null,
- };
-
- if (potential_proxy) |proxy| {
- // don't attempt to proxy the proxy thru itself.
- if (std.mem.eql(u8, proxy.host, host) and proxy.port == port and proxy.protocol == protocol) {
- return client.connectTcp(host, port, protocol);
- }
-
- if (proxy.supports_connect) tunnel: {
- return connectTunnel(client, proxy, host, port) catch |err| switch (err) {
- error.TunnelNotSupported => break :tunnel,
- else => |e| return e,
- };
- }
+pub fn connect(
+ client: *Client,
+ host: []const u8,
+ port: u16,
+ protocol: Connection.Protocol,
+) ConnectError!*Connection {
+ const proxy = switch (protocol) {
+ .plain => client.http_proxy,
+ .tls => client.https_proxy,
+ } orelse return client.connectTcp(host, port, protocol);
+
+ // Prevent proxying through itself.
+ if (std.ascii.eqlIgnoreCase(proxy.host, host) and
+ proxy.port == port and proxy.protocol == protocol)
+ {
+ return client.connectTcp(host, port, protocol);
+ }
- // fall back to using the proxy as a normal http proxy
- const conn = try client.connectTcp(proxy.host, proxy.port, proxy.protocol);
- errdefer {
- conn.closing = true;
- client.connection_pool.release(conn);
- }
+ if (proxy.supports_connect) tunnel: {
+ return connectTunnel(client, proxy, host, port) catch |err| switch (err) {
+ error.TunnelNotSupported => break :tunnel,
+ else => |e| return e,
+ };
+ }
- conn.proxied = true;
- return conn;
+ // fall back to using the proxy as a normal http proxy
+ const conn = try client.connectTcp(proxy.host, proxy.port, proxy.protocol);
+ errdefer {
+ conn.closing = true;
+ client.connection_pool.release(conn);
}
- return client.connectTcp(host, port, protocol);
+ conn.proxied = true;
+ return conn;
}
-pub const RequestError = ConnectTcpError || ConnectErrorPartial || Request.SendError || std.fmt.ParseIntError || Connection.WriteError || error{
- UnsupportedUrlScheme,
+pub const RequestError = ConnectTcpError || ConnectErrorPartial || Request.SendError ||
+ std.fmt.ParseIntError || Connection.WriteError ||
+ error{ // TODO: file a zig fmt issue for this bad indentation
+ UnsupportedUriScheme,
UriMissingHost,
CertificateBundleLoadFailure,
@@ -1409,237 +1519,246 @@ pub const RequestError = ConnectTcpError || ConnectErrorPartial || Request.SendE
pub const RequestOptions = struct {
version: http.Version = .@"HTTP/1.1",
- /// Automatically ignore 100 Continue responses. This assumes you don't care, and will have sent the body before you
- /// wait for the response.
+ /// Automatically ignore 100 Continue responses. This assumes you don't
+ /// care, and will have sent the body before you wait for the response.
///
- /// If this is not the case AND you know the server will send a 100 Continue, set this to false and wait for a
- /// response before sending the body. If you wait AND the server does not send a 100 Continue before you finish the
- /// request, then the request *will* deadlock.
+ /// If this is not the case AND you know the server will send a 100
+ /// Continue, set this to false and wait for a response before sending the
+ /// body. If you wait AND the server does not send a 100 Continue before
+ /// you finish the request, then the request *will* deadlock.
handle_continue: bool = true,
- /// Automatically follow redirects. This will only follow redirects for repeatable requests (ie. with no payload or the server has acknowledged the payload)
- handle_redirects: bool = true,
+ /// If false, close the connection after the one request. If true,
+ /// participate in the client connection pool.
+ keep_alive: bool = true,
- /// How many redirects to follow before returning an error.
- max_redirects: u32 = 3,
- header_strategy: StorageStrategy = .{ .dynamic = 16 * 1024 },
+ /// This field specifies whether to automatically follow redirects, and if
+ /// so, how many redirects to follow before returning an error.
+ ///
+ /// This will only follow redirects for repeatable requests (ie. with no
+ /// payload or the server has acknowledged the payload).
+ redirect_behavior: Request.RedirectBehavior = @enumFromInt(3),
+
+ /// Externally-owned memory used to store the server's entire HTTP header.
+ /// `error.HttpHeadersOversize` is returned from read() when a
+ /// client sends too many bytes of HTTP headers.
+ server_header_buffer: []u8,
/// Must be an already acquired connection.
connection: ?*Connection = null,
- pub const StorageStrategy = union(enum) {
- /// In this case, the client's Allocator will be used to store the
- /// entire HTTP header. This value is the maximum total size of
- /// HTTP headers allowed, otherwise
- /// error.HttpHeadersExceededSizeLimit is returned from read().
- dynamic: usize,
- /// This is used to store the entire HTTP header. If the HTTP
- /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
- /// is returned from read(). When this is used, `error.OutOfMemory`
- /// cannot be returned from `read()`.
- static: []u8,
- };
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Request.Headers = .{},
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header = &.{},
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header = &.{},
};
-pub const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
- .{ "http", .plain },
- .{ "ws", .plain },
- .{ "https", .tls },
- .{ "wss", .tls },
-});
+fn validateUri(uri: Uri, arena: Allocator) !struct { Connection.Protocol, Uri } {
+ const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
+ .{ "http", .plain },
+ .{ "ws", .plain },
+ .{ "https", .tls },
+ .{ "wss", .tls },
+ });
+ const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUriScheme;
+ var valid_uri = uri;
+ // The host is always going to be needed as a raw string for hostname resolution anyway.
+ valid_uri.host = .{
+ .raw = try (uri.host orelse return error.UriMissingHost).toRawMaybeAlloc(arena),
+ };
+ return .{ protocol, valid_uri };
+}
+
+fn uriPort(uri: Uri, protocol: Connection.Protocol) u16 {
+ return uri.port orelse switch (protocol) {
+ .plain => 80,
+ .tls => 443,
+ };
+}
/// Open a connection to the host specified by `uri` and prepare to send a HTTP request.
///
/// `uri` must remain alive during the entire request.
-/// `headers` is cloned and may be freed after this function returns.
///
/// The caller is responsible for calling `deinit()` on the `Request`.
/// This function is threadsafe.
-pub fn open(client: *Client, method: http.Method, uri: Uri, headers: http.Headers, options: RequestOptions) RequestError!Request {
- const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUrlScheme;
-
- const port: u16 = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- };
+///
+/// Asserts that "\r\n" does not occur in any header name or value.
+pub fn open(
+ client: *Client,
+ method: http.Method,
+ uri: Uri,
+ options: RequestOptions,
+) RequestError!Request {
+ if (std.debug.runtime_safety) {
+ for (options.extra_headers) |header| {
+ assert(header.name.len != 0);
+ assert(std.mem.indexOfScalar(u8, header.name, ':') == null);
+ assert(std.mem.indexOfPosLinear(u8, header.name, 0, "\r\n") == null);
+ assert(std.mem.indexOfPosLinear(u8, header.value, 0, "\r\n") == null);
+ }
+ for (options.privileged_headers) |header| {
+ assert(header.name.len != 0);
+ assert(std.mem.indexOfPosLinear(u8, header.name, 0, "\r\n") == null);
+ assert(std.mem.indexOfPosLinear(u8, header.value, 0, "\r\n") == null);
+ }
+ }
- const host = uri.host orelse return error.UriMissingHost;
+ var server_header = std.heap.FixedBufferAllocator.init(options.server_header_buffer);
+ const protocol, const valid_uri = try validateUri(uri, server_header.allocator());
- if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .Acquire)) {
+ if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .acquire)) {
if (disable_tls) unreachable;
client.ca_bundle_mutex.lock();
defer client.ca_bundle_mutex.unlock();
if (client.next_https_rescan_certs) {
- client.ca_bundle.rescan(client.allocator) catch return error.CertificateBundleLoadFailure;
- @atomicStore(bool, &client.next_https_rescan_certs, false, .Release);
+ client.ca_bundle.rescan(client.allocator) catch
+ return error.CertificateBundleLoadFailure;
+ @atomicStore(bool, &client.next_https_rescan_certs, false, .release);
}
}
- const conn = options.connection orelse try client.connect(host, port, protocol);
+ const conn = options.connection orelse
+ try client.connect(valid_uri.host.?.raw, uriPort(valid_uri, protocol), protocol);
var req: Request = .{
- .uri = uri,
+ .uri = valid_uri,
.client = client,
.connection = conn,
- .headers = try headers.clone(client.allocator), // Headers must be cloned to properly handle header transformations in redirects.
+ .keep_alive = options.keep_alive,
.method = method,
.version = options.version,
- .redirects_left = options.max_redirects,
- .handle_redirects = options.handle_redirects,
+ .transfer_encoding = .none,
+ .redirect_behavior = options.redirect_behavior,
.handle_continue = options.handle_continue,
.response = .{
+ .version = undefined,
.status = undefined,
.reason = undefined,
- .version = undefined,
- .headers = http.Headers{ .allocator = client.allocator, .owned = false },
- .parser = switch (options.header_strategy) {
- .dynamic => |max| proto.HeadersParser.initDynamic(max),
- .static => |buf| proto.HeadersParser.initStatic(buf),
- },
+ .keep_alive = undefined,
+ .parser = proto.HeadersParser.init(server_header.buffer[server_header.end_index..]),
},
- .arena = undefined,
+ .headers = options.headers,
+ .extra_headers = options.extra_headers,
+ .privileged_headers = options.privileged_headers,
};
errdefer req.deinit();
- req.arena = std.heap.ArenaAllocator.init(client.allocator);
-
return req;
}
pub const FetchOptions = struct {
+ server_header_buffer: ?[]u8 = null,
+ redirect_behavior: ?Request.RedirectBehavior = null,
+
+ /// If the server sends a body, it will be appended to this ArrayList.
+ /// `max_append_size` provides an upper limit for how much they can grow.
+ response_storage: ResponseStorage = .ignore,
+ max_append_size: ?usize = null,
+
+ location: Location,
+ method: ?http.Method = null,
+ payload: ?[]const u8 = null,
+ raw_uri: bool = false,
+ keep_alive: bool = true,
+
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Request.Headers = .{},
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header = &.{},
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header = &.{},
+
pub const Location = union(enum) {
url: []const u8,
uri: Uri,
};
- pub const Payload = union(enum) {
- string: []const u8,
- file: std.fs.File,
- none,
+ pub const ResponseStorage = union(enum) {
+ ignore,
+ /// Only the existing capacity will be used.
+ static: *std.ArrayListUnmanaged(u8),
+ dynamic: *std.ArrayList(u8),
};
-
- pub const ResponseStrategy = union(enum) {
- storage: RequestOptions.StorageStrategy,
- file: std.fs.File,
- none,
- };
-
- header_strategy: RequestOptions.StorageStrategy = .{ .dynamic = 16 * 1024 },
- response_strategy: ResponseStrategy = .{ .storage = .{ .dynamic = 16 * 1024 * 1024 } },
-
- location: Location,
- method: http.Method = .GET,
- headers: http.Headers = http.Headers{ .allocator = std.heap.page_allocator, .owned = false },
- payload: Payload = .none,
- raw_uri: bool = false,
};
pub const FetchResult = struct {
status: http.Status,
- body: ?[]const u8 = null,
- headers: http.Headers,
-
- allocator: Allocator,
- options: FetchOptions,
-
- pub fn deinit(res: *FetchResult) void {
- if (res.options.response_strategy == .storage and res.options.response_strategy.storage == .dynamic) {
- if (res.body) |body| res.allocator.free(body);
- }
-
- res.headers.deinit();
- }
};
/// Perform a one-shot HTTP request with the provided options.
///
/// This function is threadsafe.
-pub fn fetch(client: *Client, allocator: Allocator, options: FetchOptions) !FetchResult {
- const has_transfer_encoding = options.headers.contains("transfer-encoding");
- const has_content_length = options.headers.contains("content-length");
-
- if (has_content_length or has_transfer_encoding) return error.UnsupportedHeader;
-
+pub fn fetch(client: *Client, options: FetchOptions) !FetchResult {
const uri = switch (options.location) {
.url => |u| try Uri.parse(u),
.uri => |u| u,
};
-
- var req = try open(client, options.method, uri, options.headers, .{
- .header_strategy = options.header_strategy,
- .handle_redirects = options.payload == .none,
+ var server_header_buffer: [16 * 1024]u8 = undefined;
+
+ const method: http.Method = options.method orelse
+ if (options.payload != null) .POST else .GET;
+
+ var req = try open(client, method, uri, .{
+ .server_header_buffer = options.server_header_buffer orelse &server_header_buffer,
+ .redirect_behavior = options.redirect_behavior orelse
+ if (options.payload == null) @enumFromInt(3) else .unhandled,
+ .headers = options.headers,
+ .extra_headers = options.extra_headers,
+ .privileged_headers = options.privileged_headers,
+ .keep_alive = options.keep_alive,
});
defer req.deinit();
- { // Block to maintain lock of file to attempt to prevent a race condition where another process modifies the file while we are reading it.
- // This relies on other processes actually obeying the advisory lock, which is not guaranteed.
- if (options.payload == .file) try options.payload.file.lock(.shared);
- defer if (options.payload == .file) options.payload.file.unlock();
+ if (options.payload) |payload| req.transfer_encoding = .{ .content_length = payload.len };
- switch (options.payload) {
- .string => |str| req.transfer_encoding = .{ .content_length = str.len },
- .file => |file| req.transfer_encoding = .{ .content_length = (try file.stat()).size },
- .none => {},
- }
+ try req.send();
- try req.send(.{ .raw_uri = options.raw_uri });
-
- switch (options.payload) {
- .string => |str| try req.writeAll(str),
- .file => |file| {
- try file.seekTo(0);
- var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init();
- try fifo.pump(file.reader(), req.writer());
- },
- .none => {},
- }
-
- try req.finish();
- }
+ if (options.payload) |payload| try req.writeAll(payload);
+ try req.finish();
try req.wait();
- var res = FetchResult{
- .status = req.response.status,
- .headers = try req.response.headers.clone(allocator),
-
- .allocator = allocator,
- .options = options,
- };
-
- switch (options.response_strategy) {
- .storage => |storage| switch (storage) {
- .dynamic => |max| res.body = try req.reader().readAllAlloc(allocator, max),
- .static => |buf| res.body = buf[0..try req.reader().readAll(buf)],
+ switch (options.response_storage) {
+ .ignore => {
+ // Take advantage of request internals to discard the response body
+ // and make the connection available for another request.
+ req.response.skip = true;
+ assert(try req.transferRead(&.{}) == 0); // No buffer is necessary when skipping.
},
- .file => |file| {
- var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init();
- try fifo.pump(req.reader(), file.writer());
+ .dynamic => |list| {
+ const max_append_size = options.max_append_size orelse 2 * 1024 * 1024;
+ try req.reader().readAllArrayList(list, max_append_size);
},
- .none => { // Take advantage of request internals to discard the response body and make the connection available for another request.
- req.response.skip = true;
-
- const empty = @as([*]u8, undefined)[0..0];
- assert(try req.transferRead(empty) == 0); // we're skipping, no buffer is necessary
+ .static => |list| {
+ const buf = b: {
+ const buf = list.unusedCapacitySlice();
+ if (options.max_append_size) |len| {
+ if (len < buf.len) break :b buf[0..len];
+ }
+ break :b buf;
+ };
+ list.items.len += try req.reader().readAll(buf);
},
}
- return res;
+ return .{
+ .status = req.response.status,
+ };
}
test {
- const native_endian = comptime builtin.cpu.arch.endian();
- if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
- // https://github.com/ziglang/zig/issues/13782
- return error.SkipZigTest;
- }
-
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest;
-
- std.testing.refAllDecls(@This());
+ _ = &initDefaultProxies;
}
diff --git a/src/async/stream.zig b/src/async/stream.zig
index 54145f32f..9ef73d018 100644
--- a/src/async/stream.zig
+++ b/src/async/stream.zig
@@ -18,7 +18,7 @@
const std = @import("std");
const builtin = @import("builtin");
-const os = std.os;
+const posix = std.posix;
const io = std.io;
const assert = std.debug.assert;
@@ -28,15 +28,15 @@ pub const Stream = struct {
alloc: std.mem.Allocator,
conn: *tcp.Conn,
- handle: std.os.socket_t,
+ handle: posix.socket_t,
pub fn close(self: Stream) void {
- os.closeSocket(self.handle);
+ posix.close(self.handle);
self.alloc.destroy(self.conn);
}
- pub const ReadError = os.ReadError;
- pub const WriteError = os.WriteError;
+ pub const ReadError = posix.ReadError;
+ pub const WriteError = posix.WriteError;
pub const Reader = io.Reader(Stream, ReadError, read);
pub const Writer = io.Writer(Stream, WriteError, write);
@@ -55,8 +55,8 @@ pub const Stream = struct {
};
}
- pub fn readv(s: Stream, iovecs: []const os.iovec) ReadError!usize {
- return os.readv(s.handle, iovecs);
+ pub fn readv(s: Stream, iovecs: []const posix.iovec) ReadError!usize {
+ return posix.readv(s.handle, iovecs);
}
/// Returns the number of bytes read. If the number read is smaller than
@@ -105,7 +105,7 @@ pub const Stream = struct {
/// See https://github.com/ziglang/zig/issues/7699
/// See equivalent function: `std.fs.File.writev`.
- pub fn writev(self: Stream, iovecs: []const os.iovec_const) WriteError!usize {
+ pub fn writev(self: Stream, iovecs: []const posix.iovec_const) WriteError!usize {
if (iovecs.len == 0) return 0;
const first_buffer = iovecs[0].iov_base[0..iovecs[0].iov_len];
return try self.write(first_buffer);
@@ -115,7 +115,7 @@ pub const Stream = struct {
/// order to handle partial writes from the underlying OS layer.
/// See https://github.com/ziglang/zig/issues/7699
/// See equivalent function: `std.fs.File.writevAll`.
- pub fn writevAll(self: Stream, iovecs: []os.iovec_const) WriteError!void {
+ pub fn writevAll(self: Stream, iovecs: []posix.iovec_const) WriteError!void {
if (iovecs.len == 0) return;
var i: usize = 0;
diff --git a/src/async/tcp.zig b/src/async/tcp.zig
index c50c8a4be..61a495480 100644
--- a/src/async/tcp.zig
+++ b/src/async/tcp.zig
@@ -59,19 +59,19 @@ pub const Conn = struct {
loop: *Loop,
- pub fn connect(self: *Conn, socket: std.os.socket_t, address: std.net.Address) !void {
+ pub fn connect(self: *Conn, socket: std.posix.socket_t, address: std.net.Address) !void {
var cmd = Command{ .impl = NetworkImpl.init(self.loop) };
cmd.impl.connect(&cmd, socket, address);
_ = try cmd.wait();
}
- pub fn send(self: *Conn, socket: std.os.socket_t, buffer: []const u8) !usize {
+ pub fn send(self: *Conn, socket: std.posix.socket_t, buffer: []const u8) !usize {
var cmd = Command{ .impl = NetworkImpl.init(self.loop) };
cmd.impl.send(&cmd, socket, buffer);
return try cmd.wait();
}
- pub fn receive(self: *Conn, socket: std.os.socket_t, buffer: []u8) !usize {
+ pub fn receive(self: *Conn, socket: std.posix.socket_t, buffer: []u8) !usize {
var cmd = Command{ .impl = NetworkImpl.init(self.loop) };
cmd.impl.receive(&cmd, socket, buffer);
return try cmd.wait();
@@ -93,12 +93,12 @@ pub fn tcpConnectToHost(alloc: std.mem.Allocator, loop: *Loop, name: []const u8,
else => return err,
};
}
- return std.os.ConnectError.ConnectionRefused;
+ return std.posix.ConnectError.ConnectionRefused;
}
pub fn tcpConnectToAddress(alloc: std.mem.Allocator, loop: *Loop, addr: net.Address) !Stream {
- const sockfd = try std.os.socket(addr.any.family, std.os.SOCK.STREAM, std.os.IPPROTO.TCP);
- errdefer std.os.closeSocket(sockfd);
+ const sockfd = try std.posix.socket(addr.any.family, std.posix.SOCK.STREAM, std.posix.IPPROTO.TCP);
+ errdefer std.posix.close(sockfd);
var conn = try alloc.create(Conn);
conn.* = Conn{ .loop = loop };
diff --git a/src/async/test.zig b/src/async/test.zig
index 6ec1cef4e..27f86c6a1 100644
--- a/src/async/test.zig
+++ b/src/async/test.zig
@@ -40,11 +40,9 @@ test "blocking mode fetch API" {
// force client's CA cert scan from system.
try client.ca_bundle.rescan(client.allocator);
- var res = try client.fetch(alloc, .{
+ const res = try client.fetch(.{
.location = .{ .uri = try std.Uri.parse(url) },
- .payload = .none,
});
- defer res.deinit();
try std.testing.expect(res.status == .ok);
}
@@ -64,13 +62,13 @@ test "blocking mode open/send/wait API" {
// force client's CA cert scan from system.
try client.ca_bundle.rescan(client.allocator);
- var headers = try std.http.Headers.initList(alloc, &[_]std.http.Field{});
- defer headers.deinit();
-
- var req = try client.open(.GET, try std.Uri.parse(url), headers, .{});
+ var buf: [2014]u8 = undefined;
+ var req = try client.open(.GET, try std.Uri.parse(url), .{
+ .server_header_buffer = &buf,
+ });
defer req.deinit();
- try req.send(.{});
+ try req.send();
try req.finish();
try req.wait();
@@ -87,7 +85,6 @@ const AsyncClient = struct {
cli: *Client,
uri: std.Uri,
- headers: std.http.Headers,
req: ?Request = undefined,
state: State = .new,
@@ -95,9 +92,10 @@ const AsyncClient = struct {
impl: YieldImpl,
err: ?anyerror = null,
+ buf: [2014]u8 = undefined,
+
pub fn deinit(self: *AsyncRequest) void {
if (self.req) |*r| r.deinit();
- self.headers.deinit();
}
pub fn fetch(self: *AsyncRequest) void {
@@ -116,11 +114,13 @@ const AsyncClient = struct {
switch (self.state) {
.new => {
self.state = .open;
- self.req = self.cli.open(.GET, self.uri, self.headers, .{}) catch |e| return self.onerr(e);
+ self.req = self.cli.open(.GET, self.uri, .{
+ .server_header_buffer = &self.buf,
+ }) catch |e| return self.onerr(e);
},
.open => {
self.state = .send;
- self.req.?.send(.{}) catch |e| return self.onerr(e);
+ self.req.?.send() catch |e| return self.onerr(e);
},
.send => {
self.state = .finish;
@@ -164,7 +164,6 @@ const AsyncClient = struct {
.impl = YieldImpl.init(self.cli.loop),
.cli = &self.cli,
.uri = uri,
- .headers = .{ .allocator = self.cli.allocator, .owned = false },
};
}
};
diff --git a/src/browser/browser.zig b/src/browser/browser.zig
index 0157e352c..c437b53e2 100644
--- a/src/browser/browser.zig
+++ b/src/browser/browser.zig
@@ -21,7 +21,7 @@ const builtin = @import("builtin");
const Types = @import("root").Types;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Loader = @import("loader.zig").Loader;
const Dump = @import("dump.zig");
const Mime = @import("mime.zig");
@@ -224,7 +224,7 @@ pub const Page = struct {
// own the url
if (self.rawuri) |prev| alloc.free(prev);
self.rawuri = try alloc.dupe(u8, uri);
- self.uri = std.Uri.parse(self.rawuri.?) catch try std.Uri.parseWithoutScheme(self.rawuri.?);
+ self.uri = std.Uri.parse(self.rawuri.?) catch try std.Uri.parseAfterScheme("", self.rawuri.?);
// prepare origin value.
var buf = std.ArrayList(u8).init(alloc);
@@ -247,29 +247,39 @@ pub const Page = struct {
// TODO handle redirection
if (req.response.status != .ok) {
- log.debug("{?} {d} {s}\n{any}", .{
+ log.debug("{?} {d} {s}", .{
req.response.version,
req.response.status,
req.response.reason,
- req.response.headers,
+ // TODO log headers
});
return error.BadStatusCode;
}
// TODO handle charset
// https://html.spec.whatwg.org/#content-type
- const ct = req.response.headers.getFirstValue("Content-Type") orelse {
+ var it = req.response.iterateHeaders();
+ var ct: ?[]const u8 = null;
+ while (true) {
+ const h = it.next() orelse break;
+ if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
+ ct = try alloc.dupe(u8, h.value);
+ }
+ }
+ if (ct == null) {
// no content type in HTTP headers.
// TODO try to sniff mime type from the body.
log.info("no content-type HTTP header", .{});
return;
- };
- log.debug("header content-type: {s}", .{ct});
- const mime = try Mime.parse(ct);
+ }
+ defer alloc.free(ct.?);
+
+ log.debug("header content-type: {s}", .{ct.?});
+ const mime = try Mime.parse(ct.?);
if (mime.eql(Mime.HTML)) {
try self.loadHTMLDoc(req.reader(), mime.charset orelse "utf-8");
} else {
- log.info("non-HTML document: {s}", .{ct});
+ log.info("non-HTML document: {s}", .{ct.?});
// save the body into the page.
self.raw_data = try req.reader().readAllAlloc(alloc, 16 * 1024 * 1024);
@@ -500,22 +510,27 @@ pub const Page = struct {
log.debug("starting fetch script {s}", .{src});
- const u = std.Uri.parse(src) catch try std.Uri.parseWithoutScheme(src);
- const ru = try std.Uri.resolve(self.uri, u, false, alloc);
+ var buffer: [1024]u8 = undefined;
+ var b: []u8 = buffer[0..];
+ const u = try std.Uri.resolve_inplace(self.uri, src, &b);
- var fetchres = try self.session.loader.fetch(alloc, ru);
+ var fetchres = try self.session.loader.get(alloc, u);
defer fetchres.deinit();
- log.info("fech script {any}: {d}", .{ ru, fetchres.status });
+ const resp = fetchres.req.response;
+
+ log.info("fech script {any}: {d}", .{ u, resp.status });
- if (fetchres.status != .ok) return FetchError.BadStatusCode;
+ if (resp.status != .ok) return FetchError.BadStatusCode;
// TODO check content-type
+ const body = try fetchres.req.reader().readAllAlloc(alloc, 16 * 1024 * 1024);
+ defer alloc.free(body);
// check no body
- if (fetchres.body == null) return FetchError.NoBody;
+ if (body.len == 0) return FetchError.NoBody;
- var res = try self.session.env.execTryCatch(alloc, fetchres.body.?, src);
+ var res = try self.session.env.execTryCatch(alloc, body, src);
defer res.deinit(alloc);
if (res.success) {
diff --git a/src/browser/dump.zig b/src/browser/dump.zig
index 44741525e..7b705d1bf 100644
--- a/src/browser/dump.zig
+++ b/src/browser/dump.zig
@@ -19,7 +19,7 @@
const std = @import("std");
const File = std.fs.File;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Walker = @import("../dom/walker.zig").WalkerChildren;
// writer must be a std.io.Writer
diff --git a/src/browser/loader.zig b/src/browser/loader.zig
index 400072588..c476f8150 100644
--- a/src/browser/loader.zig
+++ b/src/browser/loader.zig
@@ -22,6 +22,7 @@ const user_agent = "Lightpanda.io/1.0";
pub const Loader = struct {
client: std.http.Client,
+ server_header_buffer: [1024]u8 = undefined,
pub const Response = struct {
alloc: std.mem.Allocator,
@@ -45,46 +46,30 @@ pub const Loader = struct {
self.client.deinit();
}
- // the caller must deinit the FetchResult.
- pub fn fetch(self: *Loader, alloc: std.mem.Allocator, uri: std.Uri) !std.http.Client.FetchResult {
- var headers = try std.http.Headers.initList(alloc, &[_]std.http.Field{
- .{ .name = "User-Agent", .value = user_agent },
- .{ .name = "Accept", .value = "*/*" },
- .{ .name = "Accept-Language", .value = "en-US,en;q=0.5" },
- });
- defer headers.deinit();
-
- return try self.client.fetch(alloc, .{
- .location = .{ .uri = uri },
- .headers = headers,
- .payload = .none,
- });
- }
-
// see
// https://ziglang.org/documentation/master/std/#A;std:http.Client.fetch
// for reference.
// The caller is responsible for calling `deinit()` on the `Response`.
pub fn get(self: *Loader, alloc: std.mem.Allocator, uri: std.Uri) !Response {
- var headers = try std.http.Headers.initList(alloc, &[_]std.http.Field{
- .{ .name = "User-Agent", .value = user_agent },
- .{ .name = "Accept", .value = "*/*" },
- .{ .name = "Accept-Language", .value = "en-US,en;q=0.5" },
- });
- defer headers.deinit();
-
var resp = Response{
.alloc = alloc,
.req = try alloc.create(std.http.Client.Request),
};
errdefer alloc.destroy(resp.req);
- resp.req.* = try self.client.open(.GET, uri, headers, .{
- .handle_redirects = true, // TODO handle redirects manually
+ resp.req.* = try self.client.open(.GET, uri, .{
+ .headers = .{
+ .user_agent = .{ .override = user_agent },
+ },
+ .extra_headers = &.{
+ .{ .name = "Accept", .value = "*/*" },
+ .{ .name = "Accept-Language", .value = "en-US,en;q=0.5" },
+ },
+ .server_header_buffer = &self.server_header_buffer,
});
errdefer resp.req.deinit();
- try resp.req.send(.{});
+ try resp.req.send();
try resp.req.finish();
try resp.req.wait();
@@ -92,13 +77,13 @@ pub const Loader = struct {
}
};
-test "basic url fetch" {
+test "basic url get" {
const alloc = std.testing.allocator;
var loader = Loader.init(alloc);
defer loader.deinit();
- var result = try loader.fetch(alloc, "https://en.wikipedia.org/wiki/Main_Page");
+ var result = try loader.get(alloc, "https://en.wikipedia.org/wiki/Main_Page");
defer result.deinit();
- try std.testing.expect(result.status == std.http.Status.ok);
+ try std.testing.expect(result.req.response.status == std.http.Status.ok);
}
diff --git a/src/css/libdom.zig b/src/css/libdom.zig
index 44307c638..213337260 100644
--- a/src/css/libdom.zig
+++ b/src/css/libdom.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
// Node implementation with Netsurf Libdom C lib.
pub const Node = struct {
diff --git a/src/css/libdom_test.zig b/src/css/libdom_test.zig
index 4cd267e03..c0cdbb3f1 100644
--- a/src/css/libdom_test.zig
+++ b/src/css/libdom_test.zig
@@ -19,7 +19,7 @@
const std = @import("std");
const css = @import("css.zig");
const Node = @import("libdom.zig").Node;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Matcher = struct {
const Nodes = std.ArrayList(Node);
diff --git a/src/dom/attribute.zig b/src/dom/attribute.zig
index a735171c9..d85f87a09 100644
--- a/src/dom/attribute.zig
+++ b/src/dom/attribute.zig
@@ -22,7 +22,7 @@ const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Node = @import("node.zig").Node;
const DOMException = @import("exceptions.zig").DOMException;
diff --git a/src/dom/cdata_section.zig b/src/dom/cdata_section.zig
index a17e8317b..c8ff61071 100644
--- a/src/dom/cdata_section.zig
+++ b/src/dom/cdata_section.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Text = @import("text.zig").Text;
diff --git a/src/dom/character_data.zig b/src/dom/character_data.zig
index 8ba6e2663..b195b921f 100644
--- a/src/dom/character_data.zig
+++ b/src/dom/character_data.zig
@@ -23,7 +23,7 @@ const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
const generate = @import("../generate.zig");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Node = @import("node.zig").Node;
const Comment = @import("comment.zig").Comment;
diff --git a/src/dom/comment.zig b/src/dom/comment.zig
index e82c51bc3..fe4111bc2 100644
--- a/src/dom/comment.zig
+++ b/src/dom/comment.zig
@@ -17,7 +17,7 @@
// along with this program. If not, see .
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/css.zig b/src/dom/css.zig
index 50c262e4b..0432e83ef 100644
--- a/src/dom/css.zig
+++ b/src/dom/css.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const css = @import("../css/css.zig");
const Node = @import("../css/libdom.zig").Node;
diff --git a/src/dom/document.zig b/src/dom/document.zig
index 0741b1d91..d58fcaedf 100644
--- a/src/dom/document.zig
+++ b/src/dom/document.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
@@ -449,7 +449,7 @@ pub fn testExecFn(
try checkCases(js_env, &adoptNode);
const tags = comptime parser.Tag.all();
- comptime var createElements: [(tags.len) * 2]Case = undefined;
+ var createElements: [(tags.len) * 2]Case = undefined;
inline for (tags, 0..) |tag, i| {
const tag_name = @tagName(tag);
createElements[i * 2] = Case{
diff --git a/src/dom/document_fragment.zig b/src/dom/document_fragment.zig
index 08d99165f..574e8eb12 100644
--- a/src/dom/document_fragment.zig
+++ b/src/dom/document_fragment.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/document_type.zig b/src/dom/document_type.zig
index 23c667235..cd40a7321 100644
--- a/src/dom/document_type.zig
+++ b/src/dom/document_type.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Node = @import("node.zig").Node;
diff --git a/src/dom/element.zig b/src/dom/element.zig
index fc77d95ee..8ef456a7a 100644
--- a/src/dom/element.zig
+++ b/src/dom/element.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/event_target.zig b/src/dom/event_target.zig
index 2dd7dcaee..23444bfca 100644
--- a/src/dom/event_target.zig
+++ b/src/dom/event_target.zig
@@ -24,7 +24,8 @@ const JSObjectID = jsruntime.JSObjectID;
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
+const EventHandler = @import("../events/event.zig").EventHandler;
const DOMException = @import("exceptions.zig").DOMException;
const Nod = @import("node.zig");
@@ -74,6 +75,7 @@ pub const EventTarget = struct {
eventType,
cbk,
capture orelse false,
+ EventHandler,
);
}
diff --git a/src/dom/exceptions.zig b/src/dom/exceptions.zig
index ba3952a4a..209f4345d 100644
--- a/src/dom/exceptions.zig
+++ b/src/dom/exceptions.zig
@@ -23,7 +23,7 @@ const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
// https://webidl.spec.whatwg.org/#idl-DOMException
pub const DOMException = struct {
diff --git a/src/dom/html_collection.zig b/src/dom/html_collection.zig
index 2565d0dda..298d6b72a 100644
--- a/src/dom/html_collection.zig
+++ b/src/dom/html_collection.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/implementation.zig b/src/dom/implementation.zig
index e4fff404b..ec90014f2 100644
--- a/src/dom/implementation.zig
+++ b/src/dom/implementation.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/namednodemap.zig b/src/dom/namednodemap.zig
index 0e205d06f..67840659d 100644
--- a/src/dom/namednodemap.zig
+++ b/src/dom/namednodemap.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/node.zig b/src/dom/node.zig
index 5dd0c811d..f5f0ade25 100644
--- a/src/dom/node.zig
+++ b/src/dom/node.zig
@@ -26,7 +26,7 @@ const Variadic = jsruntime.Variadic;
const generate = @import("../generate.zig");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const EventTarget = @import("event_target.zig").EventTarget;
diff --git a/src/dom/nodelist.zig b/src/dom/nodelist.zig
index 1c8c2b612..be4019023 100644
--- a/src/dom/nodelist.zig
+++ b/src/dom/nodelist.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/processing_instruction.zig b/src/dom/processing_instruction.zig
index 3e23e11b0..fc932ec5d 100644
--- a/src/dom/processing_instruction.zig
+++ b/src/dom/processing_instruction.zig
@@ -22,7 +22,7 @@ const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Node = @import("node.zig").Node;
// https://dom.spec.whatwg.org/#processinginstruction
diff --git a/src/dom/text.zig b/src/dom/text.zig
index 4ac35ed55..39c22c47d 100644
--- a/src/dom/text.zig
+++ b/src/dom/text.zig
@@ -23,7 +23,7 @@ const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
const generate = @import("../generate.zig");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const CharacterData = @import("character_data.zig").CharacterData;
const CDATASection = @import("cdata_section.zig").CDATASection;
diff --git a/src/dom/token_list.zig b/src/dom/token_list.zig
index c046cc4ff..0ed759979 100644
--- a/src/dom/token_list.zig
+++ b/src/dom/token_list.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/dom/walker.zig b/src/dom/walker.zig
index ad7ba5f75..6f2c2fbab 100644
--- a/src/dom/walker.zig
+++ b/src/dom/walker.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
pub const Walker = union(enum) {
walkerDepthFirst: WalkerDepthFirst,
diff --git a/src/events/event.zig b/src/events/event.zig
index 5a2955136..784b2d77f 100644
--- a/src/events/event.zig
+++ b/src/events/event.zig
@@ -22,10 +22,11 @@ const generate = @import("../generate.zig");
const jsruntime = @import("jsruntime");
const Callback = jsruntime.Callback;
+const CallbackResult = jsruntime.CallbackResult;
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const DOMException = @import("../dom/exceptions.zig").DOMException;
const EventTarget = @import("../dom/event_target.zig").EventTarget;
@@ -33,6 +34,8 @@ const EventTargetUnion = @import("../dom/event_target.zig").Union;
const ProgressEvent = @import("../xhr/progress_event.zig").ProgressEvent;
+const log = std.log.scoped(.events);
+
// Event interfaces
pub const Interfaces = generate.Tuple(.{
Event,
@@ -236,3 +239,33 @@ pub fn testExecFn(
};
try checkCases(js_env, &remove);
}
+
+pub const EventHandler = struct {
+ fn handle(event: ?*parser.Event, data: ?*anyopaque) callconv(.C) void {
+ if (data) |d| {
+ const func = parser.event_handler_cbk(d);
+
+ // TODO get the allocator by another way?
+ var res = CallbackResult.init(func.nat_ctx.alloc);
+ defer res.deinit();
+
+ if (event) |evt| {
+ func.trycall(.{
+ Event.toInterface(evt) catch unreachable,
+ }, &res) catch |e| log.err("event handler error: {any}", .{e});
+ } else {
+ func.trycall(.{event}, &res) catch |e| log.err("event handler error: {any}", .{e});
+ }
+
+ // in case of function error, we log the result and the trace.
+ if (!res.success) {
+ log.info("event handler error: {s}", .{res.result orelse "unknown"});
+ log.debug("{s}", .{res.stack orelse "no stack trace"});
+ }
+
+ // NOTE: we can not call func.deinit here
+ // b/c the handler can be called several times
+ // either on this dispatch event or in anoter one
+ }
+ }
+}.handle;
diff --git a/src/generate.zig b/src/generate.zig
index 252ea4f0a..8323d7d61 100644
--- a/src/generate.zig
+++ b/src/generate.zig
@@ -35,9 +35,9 @@ fn itoa(comptime i: u8) ![]const u8 {
return try std.fmt.bufPrint(buf[0..], "{d}", .{i});
}
-fn fmtName(comptime T: type) []const u8 {
+fn fmtName(comptime T: type) [:0]const u8 {
var it = std.mem.splitBackwards(u8, @typeName(T), ".");
- return it.first();
+ return it.first() ++ "";
}
// Union
@@ -168,7 +168,11 @@ pub const Union = struct {
T = *T;
}
union_fields[done] = .{
- .name = fmtName(member_T),
+ // UnionField.name expect a null terminated string.
+ // concatenate the `[]const u8` string with an empty string
+ // literal (`name ++ ""`) to explicitly coerce it to `[:0]const
+ // u8`.
+ .name = fmtName(member_T) ++ "",
.type = T,
.alignment = @alignOf(T),
};
@@ -176,7 +180,7 @@ pub const Union = struct {
}
}
const union_info = std.builtin.Type.Union{
- .layout = .Auto,
+ .layout = .auto,
.tag_type = enum_T,
.fields = &union_fields,
.decls = &decls,
@@ -286,7 +290,11 @@ fn TupleT(comptime tuple: anytype) type {
continue;
}
fields[done] = .{
- .name = try itoa(done),
+ // StructField.name expect a null terminated string.
+ // concatenate the `[]const u8` string with an empty string
+ // literal (`name ++ ""`) to explicitly coerce it to `[:0]const
+ // u8`.
+ .name = try itoa(done) ++ "",
.type = type,
.default_value = null,
.is_comptime = false,
@@ -296,7 +304,7 @@ fn TupleT(comptime tuple: anytype) type {
}
const decls: [0]std.builtin.Type.Declaration = undefined;
const info = std.builtin.Type.Struct{
- .layout = .Auto,
+ .layout = .auto,
.fields = &fields,
.decls = &decls,
.is_tuple = true,
diff --git a/src/html/document.zig b/src/html/document.zig
index f3f2ad60c..3b53e11e0 100644
--- a/src/html/document.zig
+++ b/src/html/document.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
diff --git a/src/html/elements.zig b/src/html/elements.zig
index 6b6ad83f6..dacce92e5 100644
--- a/src/html/elements.zig
+++ b/src/html/elements.zig
@@ -17,7 +17,7 @@
// along with this program. If not, see .
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const generate = @import("../generate.zig");
const jsruntime = @import("jsruntime");
@@ -246,10 +246,10 @@ pub const HTMLAnchorElement = struct {
defer u.deinit(alloc);
if (p) |pp| {
- u.uri.host = h;
+ u.uri.host = .{ .raw = h };
u.uri.port = pp;
} else {
- u.uri.host = v;
+ u.uri.host = .{ .raw = v };
u.uri.port = null;
}
@@ -271,7 +271,7 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.host = v;
+ u.uri.host = .{ .raw = v };
const href = try u.format(alloc);
try parser.anchorSetHref(self, href);
}
@@ -312,7 +312,11 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.user = v;
+ if (v) |vv| {
+ u.uri.user = .{ .raw = vv };
+ } else {
+ u.uri.user = null;
+ }
const href = try u.format(alloc);
defer alloc.free(href);
@@ -331,7 +335,11 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.password = v;
+ if (v) |vv| {
+ u.uri.password = .{ .raw = vv };
+ } else {
+ u.uri.password = null;
+ }
const href = try u.format(alloc);
defer alloc.free(href);
@@ -350,7 +358,7 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.path = v;
+ u.uri.path = .{ .raw = v };
const href = try u.format(alloc);
defer alloc.free(href);
@@ -369,7 +377,11 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.query = v;
+ if (v) |vv| {
+ u.uri.query = .{ .raw = vv };
+ } else {
+ u.uri.query = null;
+ }
const href = try u.format(alloc);
defer alloc.free(href);
@@ -388,7 +400,11 @@ pub const HTMLAnchorElement = struct {
var u = try url(self, alloc);
defer u.deinit(alloc);
- u.uri.fragment = v;
+ if (v) |vv| {
+ u.uri.fragment = .{ .raw = vv };
+ } else {
+ u.uri.fragment = null;
+ }
const href = try u.format(alloc);
defer alloc.free(href);
diff --git a/src/html/window.zig b/src/html/window.zig
index cd3c74a05..5a34ae061 100644
--- a/src/html/window.zig
+++ b/src/html/window.zig
@@ -18,7 +18,7 @@
const std = @import("std");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const EventTarget = @import("../dom/event_target.zig").EventTarget;
diff --git a/src/main.zig b/src/main.zig
index ab4fb5858..a402dc48a 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -20,7 +20,7 @@ const std = @import("std");
const jsruntime = @import("jsruntime");
-const parser = @import("netsurf.zig");
+const parser = @import("netsurf");
const apiweb = @import("apiweb.zig");
const Window = @import("html/window.zig").Window;
@@ -30,7 +30,7 @@ pub const UserContext = apiweb.UserContext;
const socket_path = "/tmp/browsercore-server.sock";
var doc: *parser.DocumentHTML = undefined;
-var server: std.net.StreamServer = undefined;
+var server: std.net.Server = undefined;
fn execJS(
alloc: std.mem.Allocator,
@@ -91,7 +91,7 @@ pub fn main() !void {
// reuse_address (SO_REUSEADDR flag) does not seems to work on unix socket
// see: https://gavv.net/articles/unix-socket-reuse/
// TODO: use a lock file instead
- std.os.unlink(socket_path) catch |err| {
+ std.posix.unlink(socket_path) catch |err| {
if (err != error.FileNotFound) {
return err;
}
@@ -99,9 +99,8 @@ pub fn main() !void {
// server
const addr = try std.net.Address.initUnix(socket_path);
- server = std.net.StreamServer.init(.{});
+ server = try addr.listen(.{});
defer server.deinit();
- try server.listen(addr);
std.debug.print("Listening on: {s}...\n", .{socket_path});
try jsruntime.loadEnv(&arena, null, execJS);
diff --git a/src/main_get.zig b/src/main_get.zig
index 3f9714fa0..6f4555536 100644
--- a/src/main_get.zig
+++ b/src/main_get.zig
@@ -25,8 +25,8 @@ const apiweb = @import("apiweb.zig");
pub const Types = jsruntime.reflect(apiweb.Interfaces);
pub const UserContext = apiweb.UserContext;
-pub const std_options = struct {
- pub const log_level = .debug;
+pub const std_options = std.Options{
+ .log_level = .debug,
};
const usage =
@@ -58,7 +58,7 @@ pub fn main() !void {
while (args.next()) |arg| {
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
try std.io.getStdErr().writer().print(usage, .{execname});
- std.os.exit(0);
+ std.posix.exit(0);
}
if (std.mem.eql(u8, "--dump", arg)) {
dump = true;
@@ -67,14 +67,14 @@ pub fn main() !void {
// allow only one url
if (url.len != 0) {
try std.io.getStdErr().writer().print(usage, .{execname});
- std.os.exit(1);
+ std.posix.exit(1);
}
url = arg;
}
if (url.len == 0) {
try std.io.getStdErr().writer().print(usage, .{execname});
- std.os.exit(1);
+ std.posix.exit(1);
}
const vm = jsruntime.VM.init();
diff --git a/src/main_shell.zig b/src/main_shell.zig
index 6eb6c11aa..f766bdd71 100644
--- a/src/main_shell.zig
+++ b/src/main_shell.zig
@@ -20,7 +20,7 @@ const std = @import("std");
const jsruntime = @import("jsruntime");
-const parser = @import("netsurf.zig");
+const parser = @import("netsurf");
const apiweb = @import("apiweb.zig");
const Window = @import("html/window.zig").Window;
const storage = @import("storage/storage.zig");
diff --git a/src/main_wpt.zig b/src/main_wpt.zig
index bc9927fda..6dd63b2d2 100644
--- a/src/main_wpt.zig
+++ b/src/main_wpt.zig
@@ -76,7 +76,7 @@ pub fn main() !void {
while (args.next()) |arg| {
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
try std.io.getStdErr().writer().print(usage, .{execname});
- std.os.exit(0);
+ std.posix.exit(0);
}
if (std.mem.eql(u8, "--json", arg)) {
out = .json;
@@ -214,12 +214,12 @@ pub fn main() !void {
}
try std.json.stringify(output.items, .{ .whitespace = .indent_2 }, std.io.getStdOut().writer());
- std.os.exit(0);
+ std.posix.exit(0);
}
if (out == .text and failures > 0) {
std.debug.print("{d}/{d} tests suites failures\n", .{ failures, run });
- std.os.exit(1);
+ std.posix.exit(1);
}
}
diff --git a/src/mimalloc.zig b/src/mimalloc/mimalloc.zig
similarity index 100%
rename from src/mimalloc.zig
rename to src/mimalloc/mimalloc.zig
diff --git a/src/netsurf.zig b/src/netsurf/netsurf.zig
similarity index 97%
rename from src/netsurf.zig
rename to src/netsurf/netsurf.zig
index aff0e1401..d7e646489 100644
--- a/src/netsurf.zig
+++ b/src/netsurf/netsurf.zig
@@ -26,13 +26,9 @@ const c = @cImport({
@cInclude("events/event.h");
});
-const mimalloc = @import("mimalloc.zig");
+const mimalloc = @import("mimalloc");
const Callback = @import("jsruntime").Callback;
-const CallbackResult = @import("jsruntime").CallbackResult;
-const EventToInterface = @import("events/event.zig").Event.toInterface;
-
-const log = std.log.scoped(.netsurf);
// init initializes netsurf lib.
// init starts a mimalloc heap arena for the netsurf session. The caller must
@@ -265,8 +261,8 @@ pub const Tag = enum(u8) {
pub fn all() []Tag {
comptime {
const info = @typeInfo(Tag).Enum;
- comptime var l: [info.fields.len]Tag = undefined;
- inline for (info.fields, 0..) |field, i| {
+ var l: [info.fields.len]Tag = undefined;
+ for (info.fields, 0..) |field, i| {
l[i] = @as(Tag, @enumFromInt(field.value));
}
return &l;
@@ -277,7 +273,7 @@ pub const Tag = enum(u8) {
comptime {
const tags = all();
var names: [tags.len][]const u8 = undefined;
- inline for (tags, 0..) |tag, i| {
+ for (tags, 0..) |tag, i| {
names[i] = tag.elementName();
}
return &names;
@@ -527,41 +523,11 @@ pub const EventType = enum(u8) {
};
// EventHandler
-fn event_handler_cbk(data: *anyopaque) *Callback {
+pub fn event_handler_cbk(data: *anyopaque) *Callback {
const ptr: *align(@alignOf(*Callback)) anyopaque = @alignCast(data);
return @as(*Callback, @ptrCast(ptr));
}
-const event_handler = struct {
- fn handle(event: ?*Event, data: ?*anyopaque) callconv(.C) void {
- if (data) |d| {
- const func = event_handler_cbk(d);
-
- // TODO get the allocator by another way?
- var res = CallbackResult.init(func.nat_ctx.alloc);
- defer res.deinit();
-
- if (event) |evt| {
- func.trycall(.{
- EventToInterface(evt) catch unreachable,
- }, &res) catch {};
- } else {
- func.trycall(.{event}, &res) catch {};
- }
-
- // in case of function error, we log the result and the trace.
- if (!res.success) {
- log.info("event handler error: {s}", .{res.result orelse "unknown"});
- log.debug("{s}", .{res.stack orelse "no stack trace"});
- }
-
- // NOTE: we can not call func.deinit here
- // b/c the handler can be called several times
- // either on this dispatch event or in anoter one
- }
- }
-}.handle;
-
// EventListener
pub const EventListener = c.dom_event_listener;
const EventListenerEntry = c.listener_entry;
@@ -642,12 +608,15 @@ pub fn eventTargetHasListener(
return null;
}
+const EventHandler = fn (event: ?*Event, data: ?*anyopaque) callconv(.C) void;
+
pub fn eventTargetAddEventListener(
et: *EventTarget,
alloc: std.mem.Allocator,
typ: []const u8,
cbk: Callback,
capture: bool,
+ handler: EventHandler,
) !void {
// this allocation will be removed either on
// eventTargetRemoveEventListener or eventTargetRemoveAllEventListeners
@@ -661,7 +630,7 @@ pub fn eventTargetAddEventListener(
const ctx = @as(*anyopaque, @ptrCast(cbk_ptr));
var listener: ?*EventListener = undefined;
- const errLst = c.dom_event_listener_create(event_handler, ctx, &listener);
+ const errLst = c.dom_event_listener_create(handler, ctx, &listener);
try DOMErr(errLst);
defer c.dom_event_listener_unref(listener);
diff --git a/src/run_tests.zig b/src/run_tests.zig
index 700d15c06..bf9067335 100644
--- a/src/run_tests.zig
+++ b/src/run_tests.zig
@@ -23,7 +23,7 @@ const jsruntime = @import("jsruntime");
const generate = @import("generate.zig");
const pretty = @import("pretty");
-const parser = @import("netsurf.zig");
+const parser = @import("netsurf");
const apiweb = @import("apiweb.zig");
const Window = @import("html/window.zig").Window;
const xhr = @import("xhr/xhr.zig");
@@ -182,7 +182,7 @@ pub fn main() !void {
while (args.next()) |arg| {
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
try std.io.getStdErr().writer().print(usage, .{});
- std.os.exit(0);
+ std.posix.exit(0);
}
if (std.mem.eql(u8, "--json", arg)) {
out = .json;
diff --git a/src/storage/storage.zig b/src/storage/storage.zig
index 0905eec07..52b467d22 100644
--- a/src/storage/storage.zig
+++ b/src/storage/storage.zig
@@ -23,7 +23,7 @@ const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
const generate = @import("../generate.zig");
-const DOMError = @import("../netsurf.zig").DOMError;
+const DOMError = @import("netsurf").DOMError;
const log = std.log.scoped(.storage);
diff --git a/src/url/url.zig b/src/url/url.zig
index 978a11b77..bea44839c 100644
--- a/src/url/url.zig
+++ b/src/url/url.zig
@@ -62,7 +62,10 @@ pub const URL = struct {
return .{
.rawuri = raw,
.uri = uri,
- .search_params = try URLSearchParams.constructor(alloc, uri.query),
+ .search_params = try URLSearchParams.constructor(
+ alloc,
+ uriComponentNullStr(uri.query),
+ ),
};
}
@@ -102,7 +105,7 @@ pub const URL = struct {
var q = std.ArrayList(u8).init(alloc);
defer q.deinit();
try self.search_params.values.encode(q.writer());
- self.uri.query = q.items;
+ self.uri.query = .{ .percent_encoded = q.items };
return try self.format(alloc);
}
@@ -116,9 +119,9 @@ pub const URL = struct {
.scheme = true,
.authentication = true,
.authority = true,
- .path = self.uri.path.len > 0,
- .query = self.uri.query != null and self.uri.query.?.len > 0,
- .fragment = self.uri.fragment != null and self.uri.fragment.?.len > 0,
+ .path = uriComponentNullStr(self.uri.path).len > 0,
+ .query = uriComponentNullStr(self.uri.query).len > 0,
+ .fragment = uriComponentNullStr(self.uri.fragment).len > 0,
}, buf.writer());
return try buf.toOwnedSlice();
}
@@ -131,11 +134,11 @@ pub const URL = struct {
}
pub fn get_username(self: *URL) []const u8 {
- return self.uri.user orelse "";
+ return uriComponentNullStr(self.uri.user);
}
pub fn get_password(self: *URL) []const u8 {
- return self.uri.password orelse "";
+ return uriComponentNullStr(self.uri.password);
}
// the caller must free the returned string.
@@ -157,7 +160,7 @@ pub const URL = struct {
}
pub fn get_hostname(self: *URL) []const u8 {
- return self.uri.host orelse "";
+ return uriComponentNullStr(self.uri.host);
}
// the caller must free the returned string.
@@ -174,8 +177,8 @@ pub const URL = struct {
}
pub fn get_pathname(self: *URL) []const u8 {
- if (self.uri.path.len == 0) return "/";
- return self.uri.path;
+ if (uriComponentStr(self.uri.path).len == 0) return "/";
+ return uriComponentStr(self.uri.path);
}
// the caller must free the returned string.
@@ -198,7 +201,7 @@ pub const URL = struct {
pub fn get_hash(self: *URL, alloc: std.mem.Allocator) ![]const u8 {
if (self.uri.fragment == null) return try alloc.dupe(u8, "");
- return try std.mem.concat(alloc, u8, &[_][]const u8{ "#", self.uri.fragment.? });
+ return try std.mem.concat(alloc, u8, &[_][]const u8{ "#", uriComponentNullStr(self.uri.fragment) });
}
pub fn get_searchParams(self: *URL) *URLSearchParams {
@@ -210,6 +213,21 @@ pub const URL = struct {
}
};
+// uriComponentNullStr converts an optional std.Uri.Component to string value.
+// The string value can be undecoded.
+fn uriComponentNullStr(c: ?std.Uri.Component) []const u8 {
+ if (c == null) return "";
+
+ return uriComponentStr(c.?);
+}
+
+fn uriComponentStr(c: std.Uri.Component) []const u8 {
+ return switch (c) {
+ .raw => |v| v,
+ .percent_encoded => |v| v,
+ };
+}
+
// https://url.spec.whatwg.org/#interface-urlsearchparams
// TODO array like
pub const URLSearchParams = struct {
diff --git a/src/user_context.zig b/src/user_context.zig
index 4860100d3..23d85955f 100644
--- a/src/user_context.zig
+++ b/src/user_context.zig
@@ -1,5 +1,5 @@
const std = @import("std");
-const parser = @import("netsurf.zig");
+const parser = @import("netsurf");
const Client = @import("async/Client.zig");
pub const UserContext = struct {
diff --git a/src/wpt/run.zig b/src/wpt/run.zig
index 3625fdf70..09586db3f 100644
--- a/src/wpt/run.zig
+++ b/src/wpt/run.zig
@@ -21,7 +21,7 @@ const fspath = std.fs.path;
const FileLoader = @import("fileloader.zig").FileLoader;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const jsruntime = @import("jsruntime");
const Loop = jsruntime.Loop;
diff --git a/src/xhr/event_target.zig b/src/xhr/event_target.zig
index 765eb3ecf..ab4e7683c 100644
--- a/src/xhr/event_target.zig
+++ b/src/xhr/event_target.zig
@@ -22,8 +22,9 @@ const jsruntime = @import("jsruntime");
const Callback = jsruntime.Callback;
const EventTarget = @import("../dom/event_target.zig").EventTarget;
+const EventHandler = @import("../events/event.zig").EventHandler;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const log = std.log.scoped(.xhr);
@@ -41,8 +42,20 @@ pub const XMLHttpRequestEventTarget = struct {
ontimeout_cbk: ?Callback = null,
onloadend_cbk: ?Callback = null,
- fn register(self: *XMLHttpRequestEventTarget, alloc: std.mem.Allocator, typ: []const u8, cbk: Callback) !void {
- try parser.eventTargetAddEventListener(@as(*parser.EventTarget, @ptrCast(self)), alloc, typ, cbk, false);
+ fn register(
+ self: *XMLHttpRequestEventTarget,
+ alloc: std.mem.Allocator,
+ typ: []const u8,
+ cbk: Callback,
+ ) !void {
+ try parser.eventTargetAddEventListener(
+ @as(*parser.EventTarget, @ptrCast(self)),
+ alloc,
+ typ,
+ cbk,
+ false,
+ EventHandler,
+ );
}
fn unregister(self: *XMLHttpRequestEventTarget, alloc: std.mem.Allocator, typ: []const u8, cbk: Callback) !void {
const et = @as(*parser.EventTarget, @ptrCast(self));
diff --git a/src/xhr/progress_event.zig b/src/xhr/progress_event.zig
index 832fb6f90..d985c76fb 100644
--- a/src/xhr/progress_event.zig
+++ b/src/xhr/progress_event.zig
@@ -22,7 +22,7 @@ const jsruntime = @import("jsruntime");
const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const Event = @import("../events/event.zig").Event;
const DOMException = @import("../dom/exceptions.zig").DOMException;
diff --git a/src/xhr/xhr.zig b/src/xhr/xhr.zig
index e395144a7..d9e44f1c3 100644
--- a/src/xhr/xhr.zig
+++ b/src/xhr/xhr.zig
@@ -23,7 +23,7 @@ const Case = jsruntime.test_utils.Case;
const checkCases = jsruntime.test_utils.checkCases;
const generate = @import("../generate.zig");
-const DOMError = @import("../netsurf.zig").DOMError;
+const DOMError = @import("netsurf").DOMError;
const DOMException = @import("../dom/exceptions.zig").DOMException;
const ProgressEvent = @import("progress_event.zig").ProgressEvent;
@@ -35,7 +35,7 @@ const Loop = jsruntime.Loop;
const YieldImpl = Loop.Yield(XMLHttpRequest);
const Client = @import("../async/Client.zig");
-const parser = @import("../netsurf.zig");
+const parser = @import("netsurf");
const UserContext = @import("../user_context.zig").UserContext;
@@ -95,6 +95,50 @@ pub const XMLHttpRequestBodyInit = union(XMLHttpRequestBodyInitTag) {
};
pub const XMLHttpRequest = struct {
+ proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
+ alloc: std.mem.Allocator,
+ cli: *Client,
+ impl: YieldImpl,
+
+ priv_state: PrivState = .new,
+ req: ?Client.Request = null,
+
+ method: std.http.Method,
+ state: u16,
+ url: ?[]const u8,
+ uri: std.Uri,
+ // request headers
+ headers: Headers,
+ sync: bool = true,
+ err: ?anyerror = null,
+
+ // TODO uncomment this field causes casting issue with
+ // XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
+ // not sure. see
+ // https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
+ // upload: ?XMLHttpRequestUpload = null,
+
+ // TODO uncomment this field causes casting issue with
+ // XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
+ // not sure. see
+ // https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
+ // timeout: u32 = 0,
+
+ withCredentials: bool = false,
+ // TODO: response readonly attribute any response;
+ response_bytes: ?[]const u8 = null,
+ response_type: ResponseType = .Empty,
+ response_headers: Headers,
+ // used by zig client to parse reponse headers.
+ response_header_buffer: [1024]u8 = undefined,
+ response_status: u10 = 0,
+ response_override_mime_type: ?[]const u8 = null,
+ response_mime: Mime = undefined,
+ response_obj: ?ResponseObj = null,
+ send_flag: bool = false,
+
+ payload: ?[]const u8 = null,
+
pub const prototype = *XMLHttpRequestEventTarget;
pub const mem_guarantied = true;
@@ -116,6 +160,91 @@ pub const XMLHttpRequest = struct {
const JSONValue = std.json.Value;
+ const Headers = struct {
+ alloc: std.mem.Allocator,
+ list: List,
+
+ const List = std.ArrayListUnmanaged(std.http.Header);
+
+ fn init(alloc: std.mem.Allocator) Headers {
+ return .{
+ .alloc = alloc,
+ .list = List{},
+ };
+ }
+
+ fn deinit(self: *Headers) void {
+ self.free();
+ self.list.deinit(self.alloc);
+ }
+
+ fn append(self: *Headers, k: []const u8, v: []const u8) !void {
+ // duplicate strings
+ const kk = try self.alloc.dupe(u8, k);
+ const vv = try self.alloc.dupe(u8, v);
+ try self.list.append(self.alloc, .{ .name = kk, .value = vv });
+ }
+
+ // free all strings allocated.
+ fn free(self: *Headers) void {
+ for (self.list.items) |h| {
+ self.alloc.free(h.name);
+ self.alloc.free(h.value);
+ }
+ }
+
+ fn clearAndFree(self: *Headers) void {
+ self.free();
+ self.list.clearAndFree(self.alloc);
+ }
+
+ fn has(self: Headers, k: []const u8) bool {
+ for (self.list.items) |h| {
+ if (std.ascii.eqlIgnoreCase(k, h.name)) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ fn getFirstValue(self: Headers, k: []const u8) ?[]const u8 {
+ for (self.list.items) |h| {
+ if (std.ascii.eqlIgnoreCase(k, h.name)) {
+ return h.value;
+ }
+ }
+
+ return null;
+ }
+
+ // replace any existing header with the same key
+ fn set(self: *Headers, k: []const u8, v: []const u8) !void {
+ for (self.list.items, 0..) |h, i| {
+ if (std.ascii.eqlIgnoreCase(k, h.name)) {
+ const hh = self.list.swapRemove(i);
+ self.alloc.free(hh.name);
+ self.alloc.free(hh.value);
+ }
+ }
+ self.append(k, v);
+ }
+
+ // TODO
+ fn sort(_: *Headers) void {}
+
+ fn all(self: Headers) []std.http.Header {
+ return self.list.items;
+ }
+
+ fn load(self: *Headers, it: *std.http.HeaderIterator) !void {
+ while (true) {
+ const h = it.next() orelse break;
+ _ = try self.append(h.name, h.value);
+ }
+ }
+ };
+
const Response = union(ResponseType) {
Empty: void,
Text: []const u8,
@@ -149,49 +278,13 @@ pub const XMLHttpRequest = struct {
const PrivState = enum { new, open, send, write, finish, wait, done };
- proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
- alloc: std.mem.Allocator,
- cli: *Client,
- impl: YieldImpl,
-
- priv_state: PrivState = .new,
- req: ?Client.Request = null,
-
- method: std.http.Method,
- state: u16,
- url: ?[]const u8,
- uri: std.Uri,
- headers: std.http.Headers,
- sync: bool = true,
- err: ?anyerror = null,
-
- // TODO uncomment this field causes casting issue with
- // XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
- // not sure. see
- // https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
- // upload: ?XMLHttpRequestUpload = null,
-
- timeout: u32 = 0,
- withCredentials: bool = false,
- // TODO: response readonly attribute any response;
- response_bytes: ?[]const u8 = null,
- response_type: ResponseType = .Empty,
- response_headers: std.http.Headers,
- response_status: u10 = 0,
- response_override_mime_type: ?[]const u8 = null,
- response_mime: Mime = undefined,
- response_obj: ?ResponseObj = null,
- send_flag: bool = false,
-
- payload: ?[]const u8 = null,
-
const min_delay: u64 = 50000000; // 50ms
pub fn constructor(alloc: std.mem.Allocator, loop: *Loop, userctx: UserContext) !XMLHttpRequest {
return .{
.alloc = alloc,
- .headers = .{ .allocator = alloc, .owned = true },
- .response_headers = .{ .allocator = alloc, .owned = true },
+ .headers = Headers.init(alloc),
+ .response_headers = Headers.init(alloc),
.impl = YieldImpl.init(loop),
.method = undefined,
.url = null,
@@ -242,16 +335,16 @@ pub const XMLHttpRequest = struct {
return self.state;
}
- pub fn get_timeout(self: *XMLHttpRequest) u32 {
- return self.timeout;
+ pub fn get_timeout(_: *XMLHttpRequest) u32 {
+ return 0;
}
- pub fn set_timeout(self: *XMLHttpRequest, timeout: u32) !void {
+ // TODO, the value is ignored for now.
+ pub fn set_timeout(_: *XMLHttpRequest, _: u32) !void {
// TODO If the current global object is a Window object and this’s
// synchronous flag is set, then throw an "InvalidAccessError"
// DOMException.
// https://xhr.spec.whatwg.org/#dom-xmlhttprequest-timeout
- self.timeout = timeout;
}
pub fn get_withCredentials(self: *XMLHttpRequest) bool {
@@ -385,7 +478,7 @@ pub const XMLHttpRequest = struct {
const body_init = XMLHttpRequestBodyInit{ .String = body.? };
// keep the user content type from request headers.
- if (self.headers.getFirstEntry("Content-Type") == null) {
+ if (self.headers.has("Content-Type")) {
// https://fetch.spec.whatwg.org/#bodyinit-safely-extract
try self.headers.append("Content-Type", try body_init.contentType());
}
@@ -411,14 +504,17 @@ pub const XMLHttpRequest = struct {
switch (self.priv_state) {
.new => {
self.priv_state = .open;
- self.req = self.cli.open(self.method, self.uri, self.headers, .{}) catch |e| return self.onErr(e);
+ self.req = self.cli.open(self.method, self.uri, .{
+ .server_header_buffer = &self.response_header_buffer,
+ .extra_headers = self.headers.all(),
+ }) catch |e| return self.onErr(e);
},
.open => {
// prepare payload transfert.
if (self.payload) |v| self.req.?.transfer_encoding = .{ .content_length = v.len };
self.priv_state = .send;
- self.req.?.send(.{}) catch |e| return self.onErr(e);
+ self.req.?.send() catch |e| return self.onErr(e);
},
.send => {
if (self.payload) |payload| {
@@ -441,7 +537,8 @@ pub const XMLHttpRequest = struct {
log.info("{any} {any} {d}", .{ self.method, self.uri, self.req.?.response.status });
self.priv_state = .done;
- self.response_headers = self.req.?.response.headers.clone(self.response_headers.allocator) catch |e| return self.onErr(e);
+ var it = self.req.?.response.iterateHeaders();
+ self.response_headers.load(&it) catch |e| return self.onErr(e);
// extract a mime type from headers.
const ct = self.response_headers.getFirstValue("Content-Type") orelse "text/xml";
diff --git a/vendor/zig-js-runtime b/vendor/zig-js-runtime
index bb0160936..d491f0414 160000
--- a/vendor/zig-js-runtime
+++ b/vendor/zig-js-runtime
@@ -1 +1 @@
-Subproject commit bb01609365509322e340e8a7cfa06e67e0a8478b
+Subproject commit d491f041407c6a85d9871bdaca3effb204e5d399