Skip to content

Commit

Permalink
Merge pull request #15278 from ziglang/memcpy-memset
Browse files Browse the repository at this point in the history
change semantics of `@memcpy` and `@memset`
  • Loading branch information
andrewrk committed Apr 26, 2023
2 parents d0311e2 + badad16 commit 3c66850
Show file tree
Hide file tree
Showing 57 changed files with 1,270 additions and 516 deletions.
52 changes: 21 additions & 31 deletions doc/langref.html.in
Expand Up @@ -8681,40 +8681,30 @@ test "integer cast panic" {
{#header_close#}

{#header_open|@memcpy#}
<pre>{#syntax#}@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize) void{#endsyntax#}</pre>
<p>
This function copies bytes from one region of memory to another. {#syntax#}dest{#endsyntax#} and
{#syntax#}source{#endsyntax#} are both pointers and must not overlap.
</p>
<p>
This function is a low level intrinsic with no safety mechanisms. Most code
should not use this function, instead using something like this:
</p>
<pre>{#syntax#}for (dest, source[0..byte_count]) |*d, s| d.* = s;{#endsyntax#}</pre>
<p>
The optimizer is intelligent enough to turn the above snippet into a memcpy.
</p>
<p>There is also a standard library function for this:</p>
<pre>{#syntax#}const mem = @import("std").mem;
mem.copy(u8, dest[0..byte_count], source[0..byte_count]);{#endsyntax#}</pre>
<pre>{#syntax#}@memcpy(noalias dest, noalias source) void{#endsyntax#}</pre>
<p>This function copies bytes from one region of memory to another.</p>
<p>{#syntax#}dest{#endsyntax#} must be a mutable slice, a mutable pointer to an array, or
a mutable many-item {#link|pointer|Pointers#}. It may have any
alignment, and it may have any element type.</p>
<p>Likewise, {#syntax#}source{#endsyntax#} must be a mutable slice, a
mutable pointer to an array, or a mutable many-item
{#link|pointer|Pointers#}. It may have any alignment, and it may have any
element type.</p>
<p>The {#syntax#}source{#endsyntax#} element type must support {#link|Type Coercion#}
into the {#syntax#}dest{#endsyntax#} element type. The element types may have
different ABI size, however, that may incur a performance penalty.</p>
<p>Similar to {#link|for#} loops, at least one of {#syntax#}source{#endsyntax#} and
{#syntax#}dest{#endsyntax#} must provide a length, and if two lengths are provided,
they must be equal.</p>
<p>Finally, the two memory regions must not overlap.</p>
{#header_close#}

{#header_open|@memset#}
<pre>{#syntax#}@memset(dest: [*]u8, c: u8, byte_count: usize) void{#endsyntax#}</pre>
<p>
This function sets a region of memory to {#syntax#}c{#endsyntax#}. {#syntax#}dest{#endsyntax#} is a pointer.
</p>
<p>
This function is a low level intrinsic with no safety mechanisms. Most
code should not use this function, instead using something like this:
</p>
<pre>{#syntax#}for (dest[0..byte_count]) |*b| b.* = c;{#endsyntax#}</pre>
<p>
The optimizer is intelligent enough to turn the above snippet into a memset.
</p>
<p>There is also a standard library function for this:</p>
<pre>{#syntax#}const mem = @import("std").mem;
mem.set(u8, dest, c);{#endsyntax#}</pre>
<pre>{#syntax#}@memset(dest, elem) void{#endsyntax#}</pre>
<p>This function sets all the elements of a memory region to {#syntax#}elem{#endsyntax#}.</p>
<p>{#syntax#}dest{#endsyntax#} must be a mutable slice or a mutable pointer to an array.
It may have any alignment, and it may have any element type.</p>
<p>{#syntax#}elem{#endsyntax#} is coerced to the element type of {#syntax#}dest{#endsyntax#}.</p>
{#header_close#}

{#header_open|@min#}
Expand Down
12 changes: 6 additions & 6 deletions lib/compiler_rt/atomics.zig
Expand Up @@ -121,22 +121,22 @@ fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) vo
_ = model;
var sl = spinlocks.get(@ptrToInt(src));
defer sl.release();
@memcpy(dest, src, size);
@memcpy(dest[0..size], src);
}

fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(dest));
defer sl.release();
@memcpy(dest, src, size);
@memcpy(dest[0..size], src);
}

fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
@memcpy(old, ptr, size);
@memcpy(ptr, val, size);
@memcpy(old[0..size], ptr);
@memcpy(ptr[0..size], val);
}

fn __atomic_compare_exchange(
Expand All @@ -155,10 +155,10 @@ fn __atomic_compare_exchange(
if (expected[i] != b) break;
} else {
// The two objects, ptr and expected, are equal
@memcpy(ptr, desired, size);
@memcpy(ptr[0..size], desired);
return 1;
}
@memcpy(expected, ptr, size);
@memcpy(expected[0..size], ptr);
return 0;
}

Expand Down
4 changes: 2 additions & 2 deletions lib/compiler_rt/emutls.zig
Expand Up @@ -139,10 +139,10 @@ const ObjectArray = struct {

if (control.default_value) |value| {
// default value: copy the content to newly allocated object.
@memcpy(data, @ptrCast([*]const u8, value), size);
@memcpy(data[0..size], @ptrCast([*]const u8, value));
} else {
// no default: return zeroed memory.
@memset(data, 0, size);
@memset(data[0..size], 0);
}

self.slots[index] = @ptrCast(*anyopaque, data);
Expand Down
4 changes: 2 additions & 2 deletions lib/std/array_hash_map.zig
Expand Up @@ -1893,7 +1893,7 @@ const IndexHeader = struct {
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
@memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
@memset(bytes[@sizeOf(IndexHeader)..], 0xff);
const result = @ptrCast(*IndexHeader, bytes.ptr);
result.* = .{
.bit_index = new_bit_index,
Expand All @@ -1914,7 +1914,7 @@ const IndexHeader = struct {
const index_size = hash_map.capacityIndexSize(header.bit_index);
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
@memset(ptr + @sizeOf(IndexHeader), 0xff, nbytes - @sizeOf(IndexHeader));
@memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
}

// Verify that the header has sufficient alignment to produce aligned arrays.
Expand Down
16 changes: 4 additions & 12 deletions lib/std/array_list.zig
Expand Up @@ -121,7 +121,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {

const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
@memset(self.items, undefined);
self.clearAndFree();
return new_memory;
}
Expand Down Expand Up @@ -281,11 +281,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
@ptrCast([*]const u8, items.ptr),
items.len * @sizeOf(T),
);
@memcpy(self.items[old_len..][0..items.len], items);
}

pub const Writer = if (T != u8)
Expand Down Expand Up @@ -601,7 +597,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ

const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
@memset(self.items, undefined);
self.clearAndFree(allocator);
return new_memory;
}
Expand Down Expand Up @@ -740,11 +736,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
@ptrCast([*]const u8, items.ptr),
items.len * @sizeOf(T),
);
@memcpy(self.items[old_len..][0..items.len], items);
}

pub const WriterContext = struct {
Expand Down
2 changes: 2 additions & 0 deletions lib/std/builtin.zig
Expand Up @@ -1002,6 +1002,8 @@ pub const panic_messages = struct {
pub const index_out_of_bounds = "index out of bounds";
pub const start_index_greater_than_end = "start index is larger than end index";
pub const for_len_mismatch = "for loop over objects with non-equal lengths";
pub const memcpy_len_mismatch = "@memcpy arguments have non-equal lengths";
pub const memcpy_alias = "@memcpy arguments alias";
};

pub noinline fn returnError(st: *StackTrace) void {
Expand Down
2 changes: 1 addition & 1 deletion lib/std/c/darwin.zig
Expand Up @@ -3670,7 +3670,7 @@ pub const MachTask = extern struct {
else => |err| return unexpectedKernError(err),
}

@memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read);
@memcpy(out_buf[0..curr_bytes_read], @intToPtr([*]const u8, vm_memory));
_ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);

out_buf = out_buf[curr_bytes_read..];
Expand Down
4 changes: 2 additions & 2 deletions lib/std/crypto/aegis.zig
Expand Up @@ -209,7 +209,7 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@memset(m.ptr, undefined, m.len);
@memset(m, undefined);
return error.AuthenticationFailed;
}
}
Expand Down Expand Up @@ -390,7 +390,7 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@memset(m.ptr, undefined, m.len);
@memset(m, undefined);
return error.AuthenticationFailed;
}
}
Expand Down
2 changes: 1 addition & 1 deletion lib/std/crypto/aes_gcm.zig
Expand Up @@ -91,7 +91,7 @@ fn AesGcm(comptime Aes: anytype) type {
acc |= (computed_tag[p] ^ tag[p]);
}
if (acc != 0) {
@memset(m.ptr, undefined, m.len);
@memset(m, undefined);
return error.AuthenticationFailed;
}

Expand Down
2 changes: 1 addition & 1 deletion lib/std/crypto/tls/Client.zig
Expand Up @@ -531,7 +531,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
const pub_key = subject.pubKey();
if (pub_key.len > main_cert_pub_key_buf.len)
return error.CertificatePublicKeyInvalid;
@memcpy(&main_cert_pub_key_buf, pub_key.ptr, pub_key.len);
@memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key);
main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
} else {
try prev_cert.verify(subject, now_sec);
Expand Down
6 changes: 3 additions & 3 deletions lib/std/crypto/utils.zig
Expand Up @@ -135,11 +135,11 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
/// Sets a slice to zeroes.
/// Prevents the store from being optimized out.
pub fn secureZero(comptime T: type, s: []T) void {
// NOTE: We do not use a volatile slice cast here since LLVM cannot
// see that it can be replaced by a memset.
// TODO: implement `@memset` for non-byte-sized element type in the llvm backend
//@memset(@as([]volatile T, s), 0);
const ptr = @ptrCast([*]volatile u8, s.ptr);
const length = s.len * @sizeOf(T);
@memset(ptr, 0, length);
@memset(ptr[0..length], 0);
}

test "crypto.utils.timingSafeEql" {
Expand Down
8 changes: 4 additions & 4 deletions lib/std/fifo.zig
Expand Up @@ -104,7 +104,7 @@ pub fn LinearFifo(
}
{ // set unused area to undefined
const unused = mem.sliceAsBytes(self.buf[self.count..]);
@memset(unused.ptr, undefined, unused.len);
@memset(unused, undefined);
}
}

Expand Down Expand Up @@ -182,12 +182,12 @@ pub fn LinearFifo(
const slice = self.readableSliceMut(0);
if (slice.len >= count) {
const unused = mem.sliceAsBytes(slice[0..count]);
@memset(unused.ptr, undefined, unused.len);
@memset(unused, undefined);
} else {
const unused = mem.sliceAsBytes(slice[0..]);
@memset(unused.ptr, undefined, unused.len);
@memset(unused, undefined);
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
@memset(unused2.ptr, undefined, unused2.len);
@memset(unused2, undefined);
}
}
if (autoalign and self.count == count) {
Expand Down
22 changes: 8 additions & 14 deletions lib/std/hash/murmur.zig
Expand Up @@ -99,9 +99,8 @@ pub const Murmur2_64 = struct {

pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
const m: u64 = 0xc6a4a7935bd1e995;
const len = @as(u64, str.len);
var h1: u64 = seed ^ (len *% m);
for (@ptrCast([*]align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| {
var h1: u64 = seed ^ (@as(u64, str.len) *% m);
for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| {
var k1: u64 = v;
if (native_endian == .Big)
k1 = @byteSwap(k1);
Expand All @@ -111,11 +110,11 @@ pub const Murmur2_64 = struct {
h1 ^= k1;
h1 *%= m;
}
const rest = len & 7;
const offset = len - rest;
const rest = str.len & 7;
const offset = str.len - rest;
if (rest > 0) {
var k1: u64 = 0;
@memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest));
@memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]);
if (native_endian == .Big)
k1 = @byteSwap(k1);
h1 ^= k1;
Expand Down Expand Up @@ -282,13 +281,8 @@ pub const Murmur3_32 = struct {

fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
const hashbytes = hashbits / 8;
var key: [256]u8 = undefined;
var hashes: [hashbytes * 256]u8 = undefined;
var final: [hashbytes]u8 = undefined;

@memset(@ptrCast([*]u8, &key[0]), 0, @sizeOf(@TypeOf(key)));
@memset(@ptrCast([*]u8, &hashes[0]), 0, @sizeOf(@TypeOf(hashes)));
@memset(@ptrCast([*]u8, &final[0]), 0, @sizeOf(@TypeOf(final)));
var key: [256]u8 = [1]u8{0} ** 256;
var hashes: [hashbytes * 256]u8 = [1]u8{0} ** (hashbytes * 256);

var i: u32 = 0;
while (i < 256) : (i += 1) {
Expand All @@ -297,7 +291,7 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
var h = hash_fn(key[0..i], 256 - i);
if (native_endian == .Big)
h = @byteSwap(h);
@memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes);
@memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h));
}

return @truncate(u32, hash_fn(&hashes, 0));
Expand Down
2 changes: 1 addition & 1 deletion lib/std/hash_map.zig
Expand Up @@ -1449,7 +1449,7 @@ pub fn HashMapUnmanaged(
}

fn initMetadatas(self: *Self) void {
@memset(@ptrCast([*]u8, self.metadata.?), 0, @sizeOf(Metadata) * self.capacity());
@memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0);
}

// This counts the number of occupied slots (not counting tombstones), which is
Expand Down
6 changes: 3 additions & 3 deletions lib/std/heap/general_purpose_allocator.zig
Expand Up @@ -759,7 +759,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
if (new_size_class <= size_class) {
if (old_mem.len > new_size) {
@memset(old_mem.ptr + new_size, undefined, old_mem.len - new_size);
@memset(old_mem[new_size..], undefined);
}
if (config.verbose_log) {
log.info("small resize {d} bytes at {*} to {d}", .{
Expand Down Expand Up @@ -911,7 +911,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.empty_buckets = bucket;
}
} else {
@memset(old_mem.ptr, undefined, old_mem.len);
@memset(old_mem, undefined);
}
if (config.safety) {
assert(self.small_allocations.remove(@ptrToInt(old_mem.ptr)));
Expand Down Expand Up @@ -1011,7 +1011,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
};
self.buckets[bucket_index] = ptr;
// Set the used bits to all zeroes
@memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
@memset(@as([*]u8, @as(*[1]u8, ptr.usedBits(0)))[0..usedBitsCount(size_class)], 0);
return ptr;
}
};
Expand Down
8 changes: 4 additions & 4 deletions lib/std/math/big/int_test.zig
Expand Up @@ -2756,7 +2756,7 @@ test "big int conversion read twos complement with padding" {

var buffer1 = try testing.allocator.alloc(u8, 16);
defer testing.allocator.free(buffer1);
@memset(buffer1.ptr, 0xaa, buffer1.len);
@memset(buffer1, 0xaa);

// writeTwosComplement:
// (1) should not write beyond buffer[0..abi_size]
Expand All @@ -2773,7 +2773,7 @@ test "big int conversion read twos complement with padding" {
a.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));

@memset(buffer1.ptr, 0xaa, buffer1.len);
@memset(buffer1, 0xaa);
try a.set(-0x01_02030405_06070809_0a0b0c0d);
bit_count = 12 * 8 + 2;

Expand All @@ -2794,7 +2794,7 @@ test "big int write twos complement +/- zero" {

var buffer1 = try testing.allocator.alloc(u8, 16);
defer testing.allocator.free(buffer1);
@memset(buffer1.ptr, 0xaa, buffer1.len);
@memset(buffer1, 0xaa);

// Test zero

Expand All @@ -2807,7 +2807,7 @@ test "big int write twos complement +/- zero" {
m.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));

@memset(buffer1.ptr, 0xaa, buffer1.len);
@memset(buffer1, 0xaa);
m.positive = false;

// Test negative zero
Expand Down

0 comments on commit 3c66850

Please sign in to comment.