Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace most uses of pointer::offset with add and sub #100822

Merged
merged 1 commit into from Aug 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion compiler/rustc_arena/src/lib.rs
Expand Up @@ -217,7 +217,7 @@ impl<T> TypedArena<T> {
} else {
let ptr = self.ptr.get();
// Advance the pointer.
self.ptr.set(self.ptr.get().offset(1));
self.ptr.set(self.ptr.get().add(1));
// Write into uninitialized memory.
ptr::write(ptr, object);
&mut *ptr
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_cranelift/example/alloc_system.rs
Expand Up @@ -94,7 +94,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
&mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_gcc/example/alloc_system.rs
Expand Up @@ -156,7 +156,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
&mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_serialize/src/serialize.rs
Expand Up @@ -273,7 +273,7 @@ impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> {
unsafe {
let ptr: *mut T = vec.as_mut_ptr();
for i in 0..len {
std::ptr::write(ptr.offset(i as isize), Decodable::decode(d));
std::ptr::write(ptr.add(i), Decodable::decode(d));
}
vec.set_len(len);
}
Expand Down
2 changes: 1 addition & 1 deletion library/alloc/src/alloc/tests.rs
Expand Up @@ -15,7 +15,7 @@ fn allocate_zeroed() {
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}
Expand Down
4 changes: 2 additions & 2 deletions library/alloc/src/collections/vec_deque/mod.rs
Expand Up @@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
ptr::swap(buf.add(i), buf.offset(src));
let src = right_edge + right_offset;
ptr::swap(buf.add(i), buf.add(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
Expand Down
6 changes: 3 additions & 3 deletions library/alloc/src/slice.rs
Expand Up @@ -1024,7 +1024,7 @@ where
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
Expand All @@ -1038,12 +1038,12 @@ where

unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = unsafe { ptr.offset(1) };
*ptr = unsafe { ptr.add(1) };
old
}

unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = unsafe { ptr.offset(-1) };
*ptr = unsafe { ptr.sub(1) };
*ptr
}

Expand Down
2 changes: 1 addition & 1 deletion library/alloc/src/vec/in_place_collect.rs
Expand Up @@ -267,7 +267,7 @@ where
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
let dst = dst_buf.offset(i as isize);
let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer
Expand Down
6 changes: 3 additions & 3 deletions library/alloc/src/vec/into_iter.rs
Expand Up @@ -160,7 +160,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.offset(1) };
self.ptr = unsafe { self.ptr.add(1) };

Some(unsafe { ptr::read(old) })
}
Expand Down Expand Up @@ -272,7 +272,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.offset(-1) };
self.end = unsafe { self.end.sub(1) };

Some(unsafe { ptr::read(self.end) })
}
Expand All @@ -288,7 +288,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
} else {
// SAFETY: same as for advance_by()
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
Expand Down
6 changes: 3 additions & 3 deletions library/alloc/src/vec/mod.rs
Expand Up @@ -1393,7 +1393,7 @@ impl<T, A: Allocator> Vec<T, A> {
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
ptr::copy(p, p.offset(1), len - index);
ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
Expand Down Expand Up @@ -1455,7 +1455,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);

// Shift everything down to fill in that spot.
ptr::copy(ptr.offset(1), ptr, len - index - 1);
ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
Expand Down Expand Up @@ -2408,7 +2408,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
ptr = ptr.offset(1);
ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
Expand Down
2 changes: 1 addition & 1 deletion library/alloc/src/vec/spec_extend.rs
Expand Up @@ -39,7 +39,7 @@ where
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
ptr = ptr.offset(1);
ptr = ptr.add(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
Expand Down
10 changes: 5 additions & 5 deletions library/alloc/tests/str.rs
Expand Up @@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
fn test_as_ptr() {
let buf = "hello".as_ptr();
unsafe {
assert_eq!(*buf.offset(0), b'h');
assert_eq!(*buf.offset(1), b'e');
assert_eq!(*buf.offset(2), b'l');
assert_eq!(*buf.offset(3), b'l');
assert_eq!(*buf.offset(4), b'o');
assert_eq!(*buf.add(0), b'h');
assert_eq!(*buf.add(1), b'e');
assert_eq!(*buf.add(2), b'l');
assert_eq!(*buf.add(3), b'l');
assert_eq!(*buf.add(4), b'o');
}
}

Expand Down
2 changes: 1 addition & 1 deletion library/core/src/slice/mod.rs
Expand Up @@ -2921,7 +2921,7 @@ impl<T> [T] {
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
let ptr_write = prev_ptr_write.add(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
Expand Down
36 changes: 18 additions & 18 deletions library/core/src/slice/sort.rs
Expand Up @@ -326,8 +326,8 @@ where
unsafe {
// Branchless comparison.
*end_l = i as u8;
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
elem = elem.offset(1);
end_l = end_l.add(!is_less(&*elem, pivot) as usize);
elem = elem.add(1);
}
}
}
Expand All @@ -352,9 +352,9 @@ where
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
unsafe {
// Branchless comparison.
elem = elem.offset(-1);
elem = elem.sub(1);
*end_r = i as u8;
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
end_r = end_r.add(is_less(&*elem, pivot) as usize);
}
}
}
Expand All @@ -365,12 +365,12 @@ where
if count > 0 {
macro_rules! left {
() => {
l.offset(*start_l as isize)
l.add(*start_l as usize)
};
}
macro_rules! right {
() => {
r.offset(-(*start_r as isize) - 1)
r.sub((*start_r as usize) + 1)
WaffleLapkin marked this conversation as resolved.
Show resolved Hide resolved
};
}

Expand Down Expand Up @@ -398,16 +398,16 @@ where
ptr::copy_nonoverlapping(right!(), left!(), 1);

for _ in 1..count {
start_l = start_l.offset(1);
start_l = start_l.add(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
start_r = start_r.offset(1);
start_r = start_r.add(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}

ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
start_l = start_l.offset(1);
start_r = start_r.offset(1);
start_l = start_l.add(1);
start_r = start_r.add(1);
}
}

Expand All @@ -420,15 +420,15 @@ where
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
// for the smaller number of remaining elements.
l = unsafe { l.offset(block_l as isize) };
l = unsafe { l.add(block_l) };
}

if start_r == end_r {
// All out-of-order elements in the right block were moved. Move to the previous block.

// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
// or `block_r` has been adjusted for the last handful of elements.
r = unsafe { r.offset(-(block_r as isize)) };
r = unsafe { r.sub(block_r) };
}

if is_done {
Expand Down Expand Up @@ -457,9 +457,9 @@ where
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
// the last block, so the `l.offset` calls are valid.
unsafe {
end_l = end_l.offset(-1);
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
r = r.offset(-1);
end_l = end_l.sub(1);
ptr::swap(l.add(*end_l as usize), r.sub(1));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not this PR, but consider following along to use usize::from(*end_l) here -- I went off to check that *end_l wasn't signed, and if it was a from then it'd be obvious that it's unsigned (since the from wouldn't compile for signed -> unsigned).

r = r.sub(1);
}
}
width(v.as_mut_ptr(), r)
Expand All @@ -470,9 +470,9 @@ where
while start_r < end_r {
// SAFETY: See the reasoning in [remaining-elements-safety].
unsafe {
end_r = end_r.offset(-1);
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
l = l.offset(1);
end_r = end_r.sub(1);
ptr::swap(l, r.sub((*end_r as usize) + 1));
l = l.add(1);
}
}
width(v.as_mut_ptr(), l)
Expand Down
4 changes: 2 additions & 2 deletions library/core/src/str/validations.rs
Expand Up @@ -216,12 +216,12 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `usize_bytes`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
// both `block` and `block.offset(1)`.
// both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.offset(1));
let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}
Expand Down
2 changes: 1 addition & 1 deletion library/panic_abort/src/android.rs
Expand Up @@ -42,7 +42,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
return; // allocation failure
}
copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len());
buf.offset(msg.len() as isize).write(0);
buf.add(msg.len()).write(0);

let func = transmute::<usize, SetAbortMessageType>(func_addr);
func(buf);
Expand Down
2 changes: 1 addition & 1 deletion library/panic_unwind/src/dwarf/eh.rs
Expand Up @@ -75,7 +75,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result

let call_site_encoding = reader.read::<u8>();
let call_site_table_length = reader.read_uleb128();
let action_table = reader.ptr.offset(call_site_table_length as isize);
let action_table = reader.ptr.add(call_site_table_length as usize);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not this PR's problem, but it's interesting that this reads a u64 and then offsets it without any checking.

let ip = context.ip;

if !USING_SJLJ_EXCEPTIONS {
Expand Down
2 changes: 1 addition & 1 deletion library/std/src/os/unix/net/addr.rs
Expand Up @@ -329,7 +329,7 @@ impl SocketAddr {

crate::ptr::copy_nonoverlapping(
namespace.as_ptr(),
addr.sun_path.as_mut_ptr().offset(1) as *mut u8,
addr.sun_path.as_mut_ptr().add(1) as *mut u8,
namespace.len(),
);
let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;
Expand Down
4 changes: 2 additions & 2 deletions library/std/src/sys/sgx/abi/usercalls/tests.rs
Expand Up @@ -17,12 +17,12 @@ fn test_copy_function() {
dst.copy_from_enclave(&[0u8; 100]);

// Copy src[0..size] to dst + offset
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) };

// Verify copy
for byte in 0..size {
unsafe {
assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]);
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sneaky: offset was {integer} previously inferred to isize, now to usize.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like byte was previously i32 (because unconstrained, as it was always ased) and is now usize? So it can probably be src[byte] too.

(It would be nice to have a warning for T-to-T as casts...)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was thinking about linting T2T casts too, while doing these refactorings. It should probably be easy to add 🤔

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, there is a clippy lint for that (docs, source), should I open an uplifting issue/pr?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess the problem is if you want to keep it -- say it's there to be resilient to c_int widths or something, where it's c_int as i64, which might be a nop sometimes.

For a clippy lint saying "hey, just allow it" is fine, but for rustc lints we generally want to be able to give a "or write ______ instead to make it obvious why you want to keep it like this" suggestion.

Nothing jumped to mind to me for how to do that, but hopefully you'll come up with something clever to lint only in the valuable places.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not linting i64 as c_int is probably possible by recording in hir (?) that c_int is a type alias, but not linting c_int as i64 I think it impossible, there is simply no way to differentiate a variable of type c_int with a variable of type i64 I believe (if c_int is an alias to i64)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As a second thought, would this still be useful as a allow-by-default lint? 🤔

}
}
}
Expand Down
4 changes: 2 additions & 2 deletions library/std/src/sys/windows/alloc.rs
Expand Up @@ -168,7 +168,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
// SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
// is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
// it, it is safe to write a header directly before it.
unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) };

// SAFETY: The returned pointer does not point to the to the start of an allocated block,
// but there is a header readable directly before it containing the location of the start
Expand Down Expand Up @@ -213,7 +213,7 @@ unsafe impl GlobalAlloc for System {

// SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
// and have a header readable directly before it.
unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
unsafe { ptr::read((ptr as *mut Header).sub(1)).0 }
}
};

Expand Down
6 changes: 3 additions & 3 deletions library/std/src/sys/windows/fs.rs
Expand Up @@ -512,7 +512,7 @@ impl File {
));
}
};
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_ptr = path_buffer.add(subst_off.into());
let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
// Absolute paths start with an NT internal namespace prefix `\??\`
// We should not let it leak through.
Expand Down Expand Up @@ -1345,10 +1345,10 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
let v = br"\??\";
let v = v.iter().map(|x| *x as u16);
for c in v.chain(original.as_os_str().encode_wide()) {
*buf.offset(i) = c;
*buf.add(i) = c;
i += 1;
}
*buf.offset(i) = 0;
*buf.add(i) = 0;
i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
Expand Down
6 changes: 3 additions & 3 deletions library/std/src/sys/windows/os.rs
Expand Up @@ -99,11 +99,11 @@ impl Iterator for Env {
}
let p = self.cur as *const u16;
let mut len = 0;
while *p.offset(len) != 0 {
while *p.add(len) != 0 {
len += 1;
}
let s = slice::from_raw_parts(p, len as usize);
self.cur = self.cur.offset(len + 1);
let s = slice::from_raw_parts(p, len);
self.cur = self.cur.add(len + 1);

// Windows allows environment variables to start with an equals
// symbol (in any other position, this is the separator between
Expand Down