Skip to content

Commit

Permalink
Add a baseline test for non-tag-preserving copy
Browse files Browse the repository at this point in the history
This shows that we don't currently inline memcpy/memmove even for
instrinsics that have been tagged with the no_preserve_cheri_tags
attribute.
  • Loading branch information
arichardson committed May 14, 2022
1 parent c49650e commit 253912d
Show file tree
Hide file tree
Showing 4 changed files with 462 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows
; use to inline struct copies >= capability size.
; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -o - < %s | FileCheck %s

%struct.pair = type { iCAPRANGE, iCAPRANGE }

; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)
declare void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)

; Without a no_preserve_tags attribute we always call memcpy. In this case we
; don't know whether the type might actually contain capabilities (e.g. unions).
define void @memcpy_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false)
ret void
}

define void @memmove_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false)
ret void
}

; We have to emit a call if the intrinsic has must_preserve_cheri_tags:
define void @memcpy_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

define void @memmove_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

; We should be able to inline the call memcpy/memmove if the intrinsic has no_preserve_cheri_tags:
; TODO: we should be able to elide this memcpy call
define void @memcpy_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}

define void @memmove_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align @CAP_RANGE_BYTES@ %a_i8, i8 addrspace(200)* align @CAP_RANGE_BYTES@ %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}
142 changes: 142 additions & 0 deletions llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-no-preserve-tags-attr.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update
; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-no-preserve-tags-attr.ll
; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows
; use to inline struct copies >= capability size.
; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -o - < %s | FileCheck %s

%struct.pair = type { i64, i64 }

; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)
declare void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)

; Without a no_preserve_tags attribute we always call memcpy. In this case we
; don't know whether the type might actually contain capabilities (e.g. unions).
define void @memcpy_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_no_attr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memcpy)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false)
ret void
}

define void @memmove_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_no_attr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memmove)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false)
ret void
}

; We have to emit a call if the intrinsic has must_preserve_cheri_tags:
define void @memcpy_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_must_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memcpy)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

define void @memmove_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_must_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memmove)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

; We should be able to inline the call memcpy/memmove if the intrinsic has no_preserve_cheri_tags:
; TODO: we should be able to elide this memcpy call
define void @memcpy_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_no_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memcpy)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}

define void @memmove_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_no_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset $c11, $c11, -16
; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill
; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8)
; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4)
; CHECK-NEXT: cgetpccincoffset $c1, $1
; CHECK-NEXT: clcbi $c12, %capcall20(memmove)($c1)
; CHECK-NEXT: cjalr $c12, $c17
; CHECK-NEXT: daddiu $4, $zero, 16
; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload
; CHECK-NEXT: cjr $c17
; CHECK-NEXT: cincoffset $c11, $c11, 16
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 8 %a_i8, i8 addrspace(200)* align 8 %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update
; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-no-preserve-tags-attr.ll
; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows
; use to inline struct copies >= capability size.
; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -o - < %s | FileCheck %s

%struct.pair = type { i32, i32 }

; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)
declare void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* nocapture writeonly, i8 addrspace(200)* nocapture readonly, i64, i1)

; Without a no_preserve_tags attribute we always call memcpy. In this case we
; don't know whether the type might actually contain capabilities (e.g. unions).
define void @memcpy_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_no_attr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp) # 8-byte Folded Spill
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: ccall memcpy
; CHECK-NEXT: clc cra, 8(csp) # 8-byte Folded Reload
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false)
ret void
}

define void @memmove_no_attr(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_no_attr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp) # 8-byte Folded Spill
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: ccall memmove
; CHECK-NEXT: clc cra, 8(csp) # 8-byte Folded Reload
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false)
ret void
}

; We have to emit a call if the intrinsic has must_preserve_cheri_tags:
define void @memcpy_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_must_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp) # 8-byte Folded Spill
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: ccall memcpy
; CHECK-NEXT: clc cra, 8(csp) # 8-byte Folded Reload
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

define void @memmove_must_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_must_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp) # 8-byte Folded Spill
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: ccall memmove
; CHECK-NEXT: clc cra, 8(csp) # 8-byte Folded Reload
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false) must_preserve_cheri_tags
ret void
}

; We should be able to inline the call memcpy/memmove if the intrinsic has no_preserve_cheri_tags:
; TODO: we should be able to elide this memcpy call
define void @memcpy_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memcpy_no_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp)
; CHECK-NEXT: .LBB4_1: # %entry
; CHECK-NEXT: # Label of block must be emitted
; CHECK-NEXT: auipcc ca4, %captab_pcrel_hi(memcpy)
; CHECK-NEXT: clc ca4, %pcrel_lo(.LBB4_1)(ca4)
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: cjalr ca4
; CHECK-NEXT: clc cra, 8(csp)
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}

define void @memmove_no_preserve(%struct.pair addrspace(200)* %a, %struct.pair addrspace(200)* %b) addrspace(200) nounwind {
; CHECK-LABEL: memmove_no_preserve:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cincoffset csp, csp, -16
; CHECK-NEXT: csc cra, 8(csp)
; CHECK-NEXT: .LBB5_1: # %entry
; CHECK-NEXT: # Label of block must be emitted
; CHECK-NEXT: auipcc ca4, %captab_pcrel_hi(memmove)
; CHECK-NEXT: clc ca4, %pcrel_lo(.LBB5_1)(ca4)
; CHECK-NEXT: addi a2, zero, 16
; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: cjalr ca4
; CHECK-NEXT: clc cra, 8(csp)
; CHECK-NEXT: cincoffset csp, csp, 16
; CHECK-NEXT: cret
entry:
%a_i8 = bitcast %struct.pair addrspace(200)* %a to i8 addrspace(200)*
%b_i8 = bitcast %struct.pair addrspace(200)* %b to i8 addrspace(200)*
call void @llvm.memmove.p200i8.p200i8.i64(i8 addrspace(200)* align 4 %a_i8, i8 addrspace(200)* align 4 %b_i8, i64 16, i1 false) no_preserve_cheri_tags
ret void
}

0 comments on commit 253912d

Please sign in to comment.