Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,13 @@ pub fn build(b: *std.Build) !void {
docs_step.dependOn(langref_step);
docs_step.dependOn(std_docs_step);

const test_default_only = b.option(bool, "test-default-only", "Limit test matrix to exactly one target configuration") orelse false;
const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse test_default_only;
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
const skip_release_fast = b.option(bool, "skip-release-fast", "Main test suite skips release-fast builds") orelse skip_release;
const skip_release_safe = b.option(bool, "skip-release-safe", "Main test suite skips release-safe builds") orelse skip_release;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse test_default_only;
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
Expand Down Expand Up @@ -449,6 +450,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
Expand All @@ -471,6 +473,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
Expand All @@ -492,6 +495,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
Expand All @@ -513,6 +517,7 @@ pub fn build(b: *std.Build) !void {
.include_paths = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.test_default_only = test_default_only,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
Expand Down
109 changes: 72 additions & 37 deletions lib/std/Io/Reader.zig
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,6 @@ pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
/// is returned instead.
///
/// See also:
/// * `peek`
/// * `toss`
pub fn peek(r: *Reader, n: usize) Error![]u8 {
try r.fill(n);
Expand Down Expand Up @@ -732,7 +731,7 @@ pub const DelimiterError = error{
};

/// Returns a slice of the next bytes of buffered data from the stream until
/// `sentinel` is found, advancing the seek position.
/// `sentinel` is found, advancing the seek position past the sentinel.
///
/// Returned slice has a sentinel.
///
Expand Down Expand Up @@ -765,7 +764,7 @@ pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel
}

/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position.
/// `delimiter` is found, advancing the seek position past the delimiter.
///
/// Returned slice includes the delimiter as the last byte.
///
Expand Down Expand Up @@ -793,32 +792,42 @@ pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// * `peekDelimiterExclusive`
/// * `takeDelimiterInclusive`
pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const buffer = r.buffer[0..r.end];
const seek = r.seek;
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |delimiter_index| {
@branchHint(.likely);
return buffer[seek .. delimiter_index + 1];
{
const contents = r.buffer[0..r.end];
const seek = r.seek;
if (std.mem.findScalarPos(u8, contents, seek, delimiter)) |end| {
@branchHint(.likely);
return contents[seek .. end + 1];
}
}
// TODO take a parameter for max search length rather than relying on buffer capacity
try rebase(r, r.buffer.len);
while (r.buffer.len - r.end != 0) {
const existing_buffered_len = r.end - r.seek;
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
r.end += n;
if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], r.seek + existing_buffered_len, delimiter)) |delimiter_index| {
return r.buffer[r.seek .. delimiter_index + 1];
while (true) {
const content_len = r.end - r.seek;
if (r.buffer.len - content_len == 0) break;
try fillMore(r);
const seek = r.seek;
const contents = r.buffer[0..r.end];
if (std.mem.findScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
return contents[seek .. end + 1];
}
}
return error.StreamTooLong;
// It might or might not be end of stream. There is no more buffer space
// left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
// this logic could be replaced by removing the exit condition from the
// above while loop. That error code would represent when `buffer` capacity
// is too small for an operation, replacing the current use of asserts.
var failing_writer = Writer.failing;
while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
assert(n == 0);
} else |err| switch (err) {
error.WriteFailed => return error.StreamTooLong,
error.ReadFailed => |e| return e,
error.EndOfStream => |e| return e,
}
}

/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position up to the delimiter.
/// `delimiter` is found, advancing the seek position up to (but not past)
/// the delimiter.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
Expand All @@ -832,20 +841,13 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeDelimiter`
/// * `takeDelimiterInclusive`
/// * `peekDelimiterExclusive`
pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return error.EndOfStream;
r.toss(remaining.len);
return remaining;
},
else => |e| return e,
};
const result = try r.peekDelimiterExclusive(delimiter);
r.toss(result.len);
return result[0 .. result.len - 1];
return result;
}

/// Returns a slice of the next bytes of buffered data from the stream until
Expand All @@ -866,7 +868,7 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// * `takeDelimiterInclusive`
/// * `takeDelimiterExclusive`
pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
const inclusive = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return null;
Expand All @@ -875,8 +877,8 @@ pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong
},
else => |e| return e,
};
r.toss(result.len + 1);
return result[0 .. result.len - 1];
r.toss(inclusive.len);
return inclusive[0 .. inclusive.len - 1];
}

/// Returns a slice of the next bytes of buffered data from the stream until
Expand Down Expand Up @@ -1403,6 +1405,9 @@ test peekSentinel {
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
r.toss(3);
try testing.expectError(error.EndOfStream, r.peekSentinel('\n'));
try testing.expectEqualStrings("c", try r.peek(1));
}

test takeDelimiterInclusive {
Expand All @@ -1417,22 +1422,52 @@ test peekDelimiterInclusive {
try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
r.toss(3);
try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n'));
try testing.expectEqualStrings("c", try r.peek(1));
}

test takeDelimiterExclusive {
var r: Reader = .fixed("ab\nc");

try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("\n", try r.take(1));

try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n'));
try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n'));
}

test peekDelimiterExclusive {
var r: Reader = .fixed("ab\nc");

try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
r.toss(3);
r.toss(2);
try testing.expectEqualStrings("", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("\n", try r.take(1));

try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
r.toss(1);
try testing.expectError(error.EndOfStream, r.peekDelimiterExclusive('\n'));
}

test takeDelimiter {
var r: Reader = .fixed("ab\nc\n\nd");
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
try testing.expectEqual(null, try r.takeDelimiter('\n'));
try testing.expectEqual(null, try r.takeDelimiter('\n'));

r = .fixed("ab\nc\n\nd\n"); // one trailing newline does not affect behavior
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
try testing.expectEqual(null, try r.takeDelimiter('\n'));
try testing.expectEqual(null, try r.takeDelimiter('\n'));
}

test streamDelimiter {
Expand Down
44 changes: 44 additions & 0 deletions lib/std/Io/Reader/Limited.zig
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {

fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @fieldParentPtr("interface", r);
if (l.remaining == .nothing) return error.EndOfStream;
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited.stream(w, combined_limit);
l.remaining = l.remaining.subtract(n).?;
Expand All @@ -51,8 +52,51 @@ test stream {

fn discard(r: *Reader, limit: Limit) Reader.Error!usize {
const l: *Limited = @fieldParentPtr("interface", r);
if (l.remaining == .nothing) return error.EndOfStream;
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited.discard(combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}

test "end of stream, read, hit limit exactly" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(4), &.{});
const r = &l.interface;

var buf: [2]u8 = undefined;
try r.readSliceAll(&buf);
try r.readSliceAll(&buf);
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
}

test "end of stream, read, hit limit after partial read" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(5), &.{});
const r = &l.interface;

var buf: [2]u8 = undefined;
try r.readSliceAll(&buf);
try r.readSliceAll(&buf);
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
}

test "end of stream, discard, hit limit exactly" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(4), &.{});
const r = &l.interface;

try r.discardAll(2);
try r.discardAll(2);
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
}

test "end of stream, discard, hit limit after partial read" {
var f: Reader = .fixed("i'm dying");
var l = f.limited(.limited(5), &.{});
const r = &l.interface;

try r.discardAll(2);
try r.discardAll(2);
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
}
37 changes: 31 additions & 6 deletions lib/std/Io/Writer.zig
Original file line number Diff line number Diff line change
Expand Up @@ -923,10 +923,12 @@ pub fn sendFileHeader(
return n;
}

/// Asserts nonzero buffer capacity.
/// Asserts nonzero buffer capacity and nonzero `limit`.
pub fn sendFileReading(w: *Writer, file_reader: *File.Reader, limit: Limit) FileReadingError!usize {
assert(limit != .nothing);
const dest = limit.slice(try w.writableSliceGreedy(1));
const n = try file_reader.read(dest);
const n = try file_reader.interface.readSliceShort(dest);
if (n == 0) return error.EndOfStream;
w.advance(n);
return n;
}
Expand Down Expand Up @@ -2778,7 +2780,8 @@ pub const Allocating = struct {
if (additional == 0) return error.EndOfStream;
a.ensureUnusedCapacity(limit.minInt64(additional)) catch return error.WriteFailed;
const dest = limit.slice(a.writer.buffer[a.writer.end..]);
const n = try file_reader.read(dest);
const n = try file_reader.interface.readSliceShort(dest);
if (n == 0) return error.EndOfStream;
a.writer.end += n;
return n;
}
Expand Down Expand Up @@ -2849,18 +2852,40 @@ test "allocating sendFile" {

const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
defer file.close();
var r_buffer: [256]u8 = undefined;
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeByte('h');
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();

var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);

var allocating: Writer.Allocating = .init(testing.allocator);
defer allocating.deinit();
try allocating.ensureUnusedCapacity(1);
try testing.expectEqual(4, allocating.writer.sendFileAll(&file_reader, .unlimited));
try testing.expectEqualStrings("abcd", allocating.writer.buffered());
}

test sendFileReading {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();

const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
defer file.close();
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();

_ = try file_reader.interface.streamRemaining(&allocating.writer);
var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);

var w_buffer: [1]u8 = undefined;
var discarding: Writer.Discarding = .init(&w_buffer);
try testing.expectEqual(4, discarding.writer.sendFileReadingAll(&file_reader, .unlimited));
}

test writeStruct {
Expand Down
Loading