Skip to content
Permalink
Browse files

Porting musl/arch/x86_64/atomic_arch.h to unsafe Rust.

  • Loading branch information...
anp committed May 5, 2016
1 parent f507e4d commit 8cac70db2ad065e06e1320f6a232d9e3c6563490
Showing with 136 additions and 119 deletions.
  1. +1 −115 musl/arch/x86_64/atomic_arch.h
  2. +1 −1 src/lib.rs
  3. +122 −0 src/platform/linux-x86_64/atomic.rs
  4. +1 −0 src/platform/linux-x86_64/mod.rs
  5. +11 −3 src/thread/mod.rs
@@ -1,116 +1,2 @@
#define a_cas a_cas
static inline int a_cas(volatile int *p, int t, int s)
{
__asm__ __volatile__ (
"lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
return t;
}

#define a_cas_p a_cas_p
static inline void *a_cas_p(volatile void *p, void *t, void *s)
{
__asm__( "lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*(void *volatile *)p)
: "a"(t), "r"(s) : "memory" );
return t;
}

#define a_swap a_swap
static inline int a_swap(volatile int *p, int v)
{
__asm__ __volatile__(
"xchg %0, %1"
: "=r"(v), "=m"(*p) : "0"(v) : "memory" );
return v;
}

#define a_fetch_add a_fetch_add
static inline int a_fetch_add(volatile int *p, int v)
{
__asm__ __volatile__(
"lock ; xadd %0, %1"
: "=r"(v), "=m"(*p) : "0"(v) : "memory" );
return v;
}

#define a_and a_and
static inline void a_and(volatile int *p, int v)
{
__asm__ __volatile__(
"lock ; and %1, %0"
: "=m"(*p) : "r"(v) : "memory" );
}

#define a_or a_or
static inline void a_or(volatile int *p, int v)
{
__asm__ __volatile__(
"lock ; or %1, %0"
: "=m"(*p) : "r"(v) : "memory" );
}

#define a_and_64 a_and_64
static inline void a_and_64(volatile uint64_t *p, uint64_t v)
{
__asm__ __volatile(
"lock ; and %1, %0"
: "=m"(*p) : "r"(v) : "memory" );
}

#define a_or_64 a_or_64
static inline void a_or_64(volatile uint64_t *p, uint64_t v)
{
__asm__ __volatile__(
"lock ; or %1, %0"
: "=m"(*p) : "r"(v) : "memory" );
}

#define a_inc a_inc
static inline void a_inc(volatile int *p)
{
__asm__ __volatile__(
"lock ; incl %0"
: "=m"(*p) : "m"(*p) : "memory" );
}

#define a_dec a_dec
static inline void a_dec(volatile int *p)
{
__asm__ __volatile__(
"lock ; decl %0"
: "=m"(*p) : "m"(*p) : "memory" );
}

#define a_store a_store
static inline void a_store(volatile int *p, int x)
{
__asm__ __volatile__(
"mov %1, %0 ; lock ; orl $0,(%%rsp)"
: "=m"(*p) : "r"(x) : "memory" );
}

#define a_barrier a_barrier
static inline void a_barrier()
{
__asm__ __volatile__( "" : : : "memory" );
}

#define a_pause a_pause
static inline void a_spin()
{
__asm__ __volatile__( "pause" : : : "memory" );
}

#define a_crash a_crash
static inline void a_crash()
{
__asm__ __volatile__( "hlt" : : : "memory" );
}

#define a_ctz_64 a_ctz_64
static inline int a_ctz_64(uint64_t x)
{
__asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
return x;
}
int a_cas(volatile int *p, int t, int s);
@@ -1,5 +1,5 @@
#![no_std]
#![feature(lang_items, linkage)]
#![feature(asm, lang_items, linkage)]

#![allow(non_camel_case_types)]

@@ -0,0 +1,122 @@
use core::sync::atomic::{AtomicPtr, Ordering};
use c_types::*;

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_cas(p: *mut c_int, mut t: c_int, s: c_int) -> c_int {
asm!("lock ; cmpxchgl $3, $1" :
"=A"(t), "=*m"(p) :
"A"(t), "r"(s) :
"memory" :
"volatile");
t
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_cas_p(p: *mut c_int, mut t: *mut c_int, s: *mut c_int) -> *mut c_void {
asm!("lock ; cmpxchg $3, $1" :
"=A"(t), "=*m"(p) :
"A"(t), "r"(s) :
"memory" :
"volatile");
t as *mut c_void
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_swap(p: *mut c_int, mut v: c_int) -> c_int {
*AtomicPtr::new(p).swap(&mut v, Ordering::Relaxed)
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_store(p: *mut c_int, x: c_int) {
asm!("mov $1, $0 ; lock ; orl $$0, (%rsp)"
: "=*m"(p) : "r"(x) : "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_inc(p: *mut c_int) {
asm!(
"lock ; incl $0"
:"=*m"(p)
:"m"(*p)
:"memory"
:"volatile"
);
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_dec(p: *mut c_int) {
asm!(
"lock ; decl $0"
:"=*m"(p)
:"m"(*p)
:"memory"
:"volatile"
);
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_fetch_add(p: *mut c_int, mut v: c_int) -> c_int {
asm!("lock ; xadd $0, $1"
: "=r"(v), "=*m"(p) : "0"(v) : "memory" : "volatile");
v
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_and(p: *mut c_int, v: c_int) {
asm!("lock ; and $1, $0"
: "=*m"(p) : "r"(v) : "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_or(p: *mut c_int, v: c_int) {
asm!("lock ; or $1, $0"
: "=*m"(p) : "r"(v) : "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_and_64(p: *mut u64, v: u64) {
asm!("lock ; and $1, $0"
: "=*m"(p) : "r"(v) : "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_or_64(p: *mut u64, v: u64) {
asm!("lock ; or $1, $0"
: "=*m"(p) : "r"(v) : "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_crash() {
asm!("hlt" ::: "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_spin() {
asm!("pause" ::: "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_barrier() {
asm!("" ::: "memory" : "volatile");
}

#[inline(always)]
#[no_mangle]
pub unsafe extern fn a_ctz_64(mut x: u64) -> u64 {
asm!("bsf $1, $0" : "=r"(x) : "r"(x));
x
}
@@ -1,3 +1,4 @@
pub use self::errno::*;

pub mod atomic;
pub mod errno;
@@ -15,10 +15,18 @@ pub const FUTEX_PRIVATE: c_int = 128;
pub const FUTEX_CLOCK_REALTIME: c_int = 256;

#[no_mangle]
pub unsafe extern fn __wake(address: *mut c_void, count: c_int, private: c_int) {
let private = if private != 0 { 128 } else { private };
pub unsafe extern "C" fn __wake(address: *mut c_void, count: c_int, private: c_int) {
let private = if private != 0 {
128
} else {
private
};

let count = if count < 0 { C_INT_MAX } else { count };
let count = if count < 0 {
C_INT_MAX
} else {
count
};

let res = syscall!(FUTEX, address, FUTEX_WAKE | private, count);

0 comments on commit 8cac70d

Please sign in to comment.
You can’t perform that action at this time.