Skip to content

Commit

Permalink
[libc][NFC] Move aligned access implementations to separate header
Browse files Browse the repository at this point in the history
  • Loading branch information
gchatelet committed Jul 9, 2023
1 parent 9259f41 commit bfd9488
Show file tree
Hide file tree
Showing 7 changed files with 211 additions and 185 deletions.
1 change: 1 addition & 0 deletions libc/src/string/memory_utils/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ add_header_library(
aarch64/memcpy_implementations.h
bcmp_implementations.h
bzero_implementations.h
generic/aligned_access.h
generic/byte_per_byte.h
memcmp_implementations.h
memcpy_implementations.h
Expand Down
54 changes: 1 addition & 53 deletions libc/src/string/memory_utils/bcmp_implementations.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "src/__support/common.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY LIBC_LOOP_NOUNROLL
#include "src/__support/macros/properties/architectures.h"
#include "src/string/memory_utils/generic/aligned_access.h"
#include "src/string/memory_utils/generic/byte_per_byte.h"
#include "src/string/memory_utils/op_aarch64.h"
#include "src/string/memory_utils/op_builtin.h"
Expand All @@ -23,59 +24,6 @@

namespace __llvm_libc {

[[maybe_unused]] LIBC_INLINE BcmpReturnType
inline_bcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_bcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_bcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint64_t a;
if (p2_alignment == 0)
a = load64_aligned<uint64_t>(p2, offset);
else if (p2_alignment == 4)
a = load64_aligned<uint32_t, uint32_t>(p2, offset);
else if (p2_alignment == 2)
a = load64_aligned<uint16_t, uint16_t, uint16_t, uint16_t>(p2, offset);
else
a = load64_aligned<uint8_t, uint16_t, uint16_t, uint16_t, uint8_t>(
p2, offset);
uint64_t b = load64_aligned<uint64_t>(p1, offset);
if (a != b)
return BcmpReturnType::NONZERO();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}

[[maybe_unused]] LIBC_INLINE BcmpReturnType
inline_bcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_bcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_bcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint32_t a;
if (p2_alignment == 0)
a = load32_aligned<uint32_t>(p2, offset);
else if (p2_alignment == 2)
a = load32_aligned<uint16_t, uint16_t>(p2, offset);
else
a = load32_aligned<uint8_t, uint16_t, uint8_t>(p2, offset);
uint32_t b = load32_aligned<uint32_t>(p1, offset);
if (a != b)
return BcmpReturnType::NONZERO();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}

#if defined(LIBC_TARGET_ARCH_IS_X86) || defined(LIBC_TARGET_ARCH_IS_AARCH64)
[[maybe_unused]] LIBC_INLINE BcmpReturnType
inline_bcmp_generic_gt16(CPtr p1, CPtr p2, size_t count) {
Expand Down
205 changes: 205 additions & 0 deletions libc/src/string/memory_utils/generic/aligned_access.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
//===-- Implementations for platform with mandatory aligned memory access -===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// For some platforms, unaligned loads and stores are either illegal or very
// slow. The implementations in this file make sure all loads and stores are
// always aligned.
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_GENERIC_ALIGNED_ACCESS_H
#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_GENERIC_ALIGNED_ACCESS_H

#include "src/__support/macros/config.h" // LIBC_INLINE
#include "src/string/memory_utils/generic/byte_per_byte.h"
#include "src/string/memory_utils/op_generic.h" // generic::splat
#include "src/string/memory_utils/utils.h" // Ptr, CPtr

#include <stddef.h> // size_t

namespace __llvm_libc {

[[maybe_unused]] LIBC_INLINE uint32_t load32_aligned(CPtr ptr, size_t offset,
size_t alignment) {
if (alignment == 0)
return load32_aligned<uint32_t>(ptr, offset);
else if (alignment == 2)
return load32_aligned<uint16_t, uint16_t>(ptr, offset);
else
return load32_aligned<uint8_t, uint16_t, uint8_t>(ptr, offset);
}

[[maybe_unused]] LIBC_INLINE uint64_t load64_aligned(CPtr ptr, size_t offset,
size_t alignment) {
if (alignment == 0)
return load64_aligned<uint64_t>(ptr, offset);
else if (alignment == 4)
return load64_aligned<uint32_t, uint32_t>(ptr, offset);
else if (alignment == 2)
return load64_aligned<uint16_t, uint16_t, uint16_t, uint16_t>(ptr, offset);
else
return load64_aligned<uint8_t, uint16_t, uint16_t, uint16_t, uint8_t>(
ptr, offset);
}

///////////////////////////////////////////////////////////////////////////////
// memcpy
///////////////////////////////////////////////////////////////////////////////

[[maybe_unused]] LIBC_INLINE void
inline_memcpy_aligned_access_32bit(Ptr __restrict dst, CPtr __restrict src,
size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_memcpy_byte_per_byte(dst, src, count);
size_t bytes_to_dst_align = distance_to_align_up<kAlign>(dst);
inline_memcpy_byte_per_byte(dst, src, bytes_to_dst_align);
size_t offset = bytes_to_dst_align;
size_t src_alignment = distance_to_align_down<kAlign>(src + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint32_t value = load32_aligned(src, offset, src_alignment);
store32_aligned<uint32_t>(value, dst, offset);
}
// remainder
inline_memcpy_byte_per_byte(dst, src, count, offset);
}

[[maybe_unused]] LIBC_INLINE void
inline_memcpy_aligned_access_64bit(Ptr __restrict dst, CPtr __restrict src,
size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_memcpy_byte_per_byte(dst, src, count);
size_t bytes_to_dst_align = distance_to_align_up<kAlign>(dst);
inline_memcpy_byte_per_byte(dst, src, bytes_to_dst_align);
size_t offset = bytes_to_dst_align;
size_t src_alignment = distance_to_align_down<kAlign>(src + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint64_t value = load64_aligned(src, offset, src_alignment);
store64_aligned<uint64_t>(value, dst, offset);
}
// remainder
inline_memcpy_byte_per_byte(dst, src, count, offset);
}

///////////////////////////////////////////////////////////////////////////////
// memset
///////////////////////////////////////////////////////////////////////////////

[[maybe_unused]] LIBC_INLINE static void
inline_memset_aligned_access_32bit(Ptr dst, uint8_t value, size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_memset_byte_per_byte(dst, value, count);
size_t bytes_to_dst_align = distance_to_align_up<kAlign>(dst);
inline_memset_byte_per_byte(dst, value, bytes_to_dst_align);
size_t offset = bytes_to_dst_align;
for (; offset < count - kAlign; offset += kAlign)
store32_aligned<uint32_t>(generic::splat<uint32_t>(value), dst, offset);
inline_memset_byte_per_byte(dst, value, count, offset);
}

[[maybe_unused]] LIBC_INLINE static void
inline_memset_aligned_access_64bit(Ptr dst, uint8_t value, size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_memset_byte_per_byte(dst, value, count);
size_t bytes_to_dst_align = distance_to_align_up<kAlign>(dst);
inline_memset_byte_per_byte(dst, value, bytes_to_dst_align);
size_t offset = bytes_to_dst_align;
for (; offset < count - kAlign; offset += kAlign)
store64_aligned<uint64_t>(generic::splat<uint64_t>(value), dst, offset);
inline_memset_byte_per_byte(dst, value, count, offset);
}

///////////////////////////////////////////////////////////////////////////////
// bcmp
///////////////////////////////////////////////////////////////////////////////

[[maybe_unused]] LIBC_INLINE BcmpReturnType
inline_bcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_bcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_bcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint32_t a = load32_aligned<uint32_t>(p1, offset);
uint32_t b = load32_aligned(p2, offset, p2_alignment);
if (a != b)
return BcmpReturnType::NONZERO();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}

[[maybe_unused]] LIBC_INLINE BcmpReturnType
inline_bcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_bcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_bcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint64_t a = load64_aligned<uint64_t>(p1, offset);
uint64_t b = load64_aligned(p2, offset, p2_alignment);
if (a != b)
return BcmpReturnType::NONZERO();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}

///////////////////////////////////////////////////////////////////////////////
// memcmp
///////////////////////////////////////////////////////////////////////////////

[[maybe_unused]] LIBC_INLINE MemcmpReturnType
inline_memcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_memcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_memcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint32_t a = load32_aligned<uint32_t>(p1, offset);
uint32_t b = load32_aligned(p2, offset, p2_alignment);
if (a != b)
return cmp_uint32_t(Endian::to_big_endian(a), Endian::to_big_endian(b));
}
return inline_memcmp_byte_per_byte(p1, p2, count, offset);
}

[[maybe_unused]] LIBC_INLINE MemcmpReturnType
inline_memcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_memcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_memcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint64_t a = load64_aligned<uint64_t>(p1, offset);
uint64_t b = load64_aligned(p2, offset, p2_alignment);
if (a != b)
return cmp_neq_uint64_t(Endian::to_big_endian(a),
Endian::to_big_endian(b));
}
return inline_memcmp_byte_per_byte(p1, p2, count, offset);
}

} // namespace __llvm_libc

#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_GENERIC_ALIGNED_ACCESS_H
55 changes: 1 addition & 54 deletions libc/src/string/memory_utils/memcmp_implementations.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "src/__support/common.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY LIBC_LOOP_NOUNROLL
#include "src/__support/macros/properties/architectures.h"
#include "src/string/memory_utils/generic/aligned_access.h"
#include "src/string/memory_utils/generic/byte_per_byte.h"
#include "src/string/memory_utils/op_generic.h"
#include "src/string/memory_utils/op_riscv.h"
Expand All @@ -27,60 +28,6 @@

namespace __llvm_libc {

[[maybe_unused]] LIBC_INLINE MemcmpReturnType
inline_memcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint64_t);
if (count <= 2 * kAlign)
return inline_memcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_memcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint64_t b;
if (p2_alignment == 0)
b = load64_aligned<uint64_t>(p2, offset);
else if (p2_alignment == 4)
b = load64_aligned<uint32_t, uint32_t>(p2, offset);
else if (p2_alignment == 2)
b = load64_aligned<uint16_t, uint16_t, uint16_t, uint16_t>(p2, offset);
else
b = load64_aligned<uint8_t, uint16_t, uint16_t, uint16_t, uint8_t>(
p2, offset);
uint64_t a = load64_aligned<uint64_t>(p1, offset);
if (a != b)
return cmp_neq_uint64_t(Endian::to_big_endian(a),
Endian::to_big_endian(b));
}
return inline_memcmp_byte_per_byte(p1, p2, count, offset);
}

[[maybe_unused]] LIBC_INLINE MemcmpReturnType
inline_memcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
constexpr size_t kAlign = sizeof(uint32_t);
if (count <= 2 * kAlign)
return inline_memcmp_byte_per_byte(p1, p2, count);
size_t bytes_to_p1_align = distance_to_align_up<kAlign>(p1);
if (auto value = inline_memcmp_byte_per_byte(p1, p2, bytes_to_p1_align))
return value;
size_t offset = bytes_to_p1_align;
size_t p2_alignment = distance_to_align_down<kAlign>(p2 + offset);
for (; offset < count - kAlign; offset += kAlign) {
uint32_t b;
if (p2_alignment == 0)
b = load32_aligned<uint32_t>(p2, offset);
else if (p2_alignment == 2)
b = load32_aligned<uint16_t, uint16_t>(p2, offset);
else
b = load32_aligned<uint8_t, uint16_t, uint8_t>(p2, offset);
uint32_t a = load32_aligned<uint32_t>(p1, offset);
if (a != b)
return cmp_uint32_t(Endian::to_big_endian(a), Endian::to_big_endian(b));
}
return inline_memcmp_byte_per_byte(p1, p2, count, offset);
}

LIBC_INLINE MemcmpReturnType inline_memcmp(CPtr p1, CPtr p2, size_t count) {
#if defined(LIBC_TARGET_ARCH_IS_X86)
return inline_memcmp_x86(p1, p2, count);
Expand Down
Loading

0 comments on commit bfd9488

Please sign in to comment.