diff --git a/libcxx/docs/DesignDocs/AtomicDesign.rst b/libcxx/docs/DesignDocs/AtomicDesign.rst new file mode 100644 index 0000000000000..4b28ab2a8218a --- /dev/null +++ b/libcxx/docs/DesignDocs/AtomicDesign.rst @@ -0,0 +1,797 @@ + +==================== +```` Design +==================== + +There were originally 3 designs under consideration. They differ in where most +of the implementation work is done. The functionality exposed to the customer +should be identical (and conforming) for all three designs. + + +Design A: Minimal work for the library +====================================== +The compiler supplies all of the intrinsics as described below. This list of +intrinsics roughly parallels the requirements of the C and C++ atomics proposals. +The C and C++ library implementations simply drop through to these intrinsics. +Anything the platform does not support in hardware, the compiler +arranges for a (compiler-rt) library call to be made which will do the job with +a mutex, and in this case ignoring the memory ordering parameter (effectively +implementing ``memory_order_seq_cst``). + +Ultimate efficiency is preferred over run time error checking. Undefined +behavior is acceptable when the inputs do not conform as defined below. + +.. code-block:: cpp + + // In every intrinsic signature below, type* atomic_obj may be a pointer to a + // volatile-qualified type. Memory ordering values map to the following meanings: + // memory_order_relaxed == 0 + // memory_order_consume == 1 + // memory_order_acquire == 2 + // memory_order_release == 3 + // memory_order_acq_rel == 4 + // memory_order_seq_cst == 5 + + // type must be trivially copyable + // type represents a "type argument" + bool __atomic_is_lock_free(type); + + // type must be trivially copyable + // Behavior is defined for mem_ord = 0, 1, 2, 5 + type __atomic_load(const type* atomic_obj, int mem_ord); + + // type must be trivially copyable + // Behavior is defined for mem_ord = 0, 3, 5 + void __atomic_store(type* atomic_obj, type desired, int mem_ord); + + // type must be trivially copyable + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_exchange(type* atomic_obj, type desired, int mem_ord); + + // type must be trivially copyable + // Behavior is defined for mem_success = [0 ... 5], + // mem_failure <= mem_success + // mem_failure != 3 + // mem_failure != 4 + bool __atomic_compare_exchange_strong(type* atomic_obj, + type* expected, type desired, + int mem_success, int mem_failure); + + // type must be trivially copyable + // Behavior is defined for mem_success = [0 ... 5], + // mem_failure <= mem_success + // mem_failure != 3 + // mem_failure != 4 + bool __atomic_compare_exchange_weak(type* atomic_obj, + type* expected, type desired, + int mem_success, int mem_failure); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_fetch_add(type* atomic_obj, type operand, int mem_ord); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_fetch_sub(type* atomic_obj, type operand, int mem_ord); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_fetch_and(type* atomic_obj, type operand, int mem_ord); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_fetch_or(type* atomic_obj, type operand, int mem_ord); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + // Behavior is defined for mem_ord = [0 ... 5] + type __atomic_fetch_xor(type* atomic_obj, type operand, int mem_ord); + + // Behavior is defined for mem_ord = [0 ... 5] + void* __atomic_fetch_add(void** atomic_obj, ptrdiff_t operand, int mem_ord); + void* __atomic_fetch_sub(void** atomic_obj, ptrdiff_t operand, int mem_ord); + + // Behavior is defined for mem_ord = [0 ... 5] + void __atomic_thread_fence(int mem_ord); + void __atomic_signal_fence(int mem_ord); + +If desired the intrinsics taking a single ``mem_ord`` parameter can default +this argument to 5. + +If desired the intrinsics taking two ordering parameters can default ``mem_success`` +to 5, and ``mem_failure`` to ``translate_memory_order(mem_success)`` where +``translate_memory_order(mem_success)`` is defined as: + +.. code-block:: cpp + + int translate_memory_order(int o) { + switch (o) { + case 4: + return 2; + case 3: + return 0; + } + return o; + } + +Below are representative C++ implementations of all of the operations. Their +purpose is to document the desired semantics of each operation, assuming +``memory_order_seq_cst``. This is essentially the code that will be called +if the front end calls out to compiler-rt. + +.. code-block:: cpp + + template + T __atomic_load(T const volatile* obj) { + unique_lock _(some_mutex); + return *obj; + } + + template + void __atomic_store(T volatile* obj, T desr) { + unique_lock _(some_mutex); + *obj = desr; + } + + template + T __atomic_exchange(T volatile* obj, T desr) { + unique_lock _(some_mutex); + T r = *obj; + *obj = desr; + return r; + } + + template + bool __atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr) { + unique_lock _(some_mutex); + if (std::memcmp(const_cast(obj), exp, sizeof(T)) == 0) // if (*obj == *exp) + { + std::memcpy(const_cast(obj), &desr, sizeof(T)); // *obj = desr; + return true; + } + std::memcpy(exp, const_cast(obj), sizeof(T)); // *exp = *obj; + return false; + } + + // May spuriously return false (even if *obj == *exp) + template + bool __atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr) { + unique_lock _(some_mutex); + if (std::memcmp(const_cast(obj), exp, sizeof(T)) == 0) // if (*obj == *exp) + { + std::memcpy(const_cast(obj), &desr, sizeof(T)); // *obj = desr; + return true; + } + std::memcpy(exp, const_cast(obj), sizeof(T)); // *exp = *obj; + return false; + } + + template + T __atomic_fetch_add(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj += operand; + return r; + } + + template + T __atomic_fetch_sub(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj -= operand; + return r; + } + + template + T __atomic_fetch_and(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj &= operand; + return r; + } + + template + T __atomic_fetch_or(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj |= operand; + return r; + } + + template + T __atomic_fetch_xor(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj ^= operand; + return r; + } + + void* __atomic_fetch_add(void* volatile* obj, ptrdiff_t operand) { + unique_lock _(some_mutex); + void* r = *obj; + (char*&)(*obj) += operand; + return r; + } + + void* __atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand) { + unique_lock _(some_mutex); + void* r = *obj; + (char*&)(*obj) -= operand; + return r; + } + + void __atomic_thread_fence() { + unique_lock _(some_mutex); + } + + void __atomic_signal_fence() { + unique_lock _(some_mutex); + } + + +Design B: Something in between +============================== +This is a variation of design A which puts the burden on the library to arrange +for the correct manipulation of the run time memory ordering arguments, and only +calls the compiler for well-defined memory orderings. I think of this design as +the worst of A and C, instead of the best of A and C. But I offer it as an +option in the spirit of completeness. + +.. code-block:: cpp + + // type must be trivially copyable + bool __atomic_is_lock_free(const type* atomic_obj); + + // type must be trivially copyable + type __atomic_load_relaxed(const volatile type* atomic_obj); + type __atomic_load_consume(const volatile type* atomic_obj); + type __atomic_load_acquire(const volatile type* atomic_obj); + type __atomic_load_seq_cst(const volatile type* atomic_obj); + + // type must be trivially copyable + type __atomic_store_relaxed(volatile type* atomic_obj, type desired); + type __atomic_store_release(volatile type* atomic_obj, type desired); + type __atomic_store_seq_cst(volatile type* atomic_obj, type desired); + + // type must be trivially copyable + type __atomic_exchange_relaxed(volatile type* atomic_obj, type desired); + type __atomic_exchange_consume(volatile type* atomic_obj, type desired); + type __atomic_exchange_acquire(volatile type* atomic_obj, type desired); + type __atomic_exchange_release(volatile type* atomic_obj, type desired); + type __atomic_exchange_acq_rel(volatile type* atomic_obj, type desired); + type __atomic_exchange_seq_cst(volatile type* atomic_obj, type desired); + + // type must be trivially copyable + bool __atomic_compare_exchange_strong_relaxed_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_consume_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_consume_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acquire_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acquire_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acquire_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_release_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_release_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_release_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acq_rel_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acq_rel_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_acq_rel_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_seq_cst_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_seq_cst_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_seq_cst_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_strong_seq_cst_seq_cst(volatile type* atomic_obj, + type* expected, + type desired); + + // type must be trivially copyable + bool __atomic_compare_exchange_weak_relaxed_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_consume_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_consume_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acquire_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acquire_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acquire_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_release_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_release_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_release_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acq_rel_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acq_rel_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_acq_rel_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_seq_cst_relaxed(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_seq_cst_consume(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_seq_cst_acquire(volatile type* atomic_obj, + type* expected, + type desired); + bool __atomic_compare_exchange_weak_seq_cst_seq_cst(volatile type* atomic_obj, + type* expected, + type desired); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + type __atomic_fetch_add_relaxed(volatile type* atomic_obj, type operand); + type __atomic_fetch_add_consume(volatile type* atomic_obj, type operand); + type __atomic_fetch_add_acquire(volatile type* atomic_obj, type operand); + type __atomic_fetch_add_release(volatile type* atomic_obj, type operand); + type __atomic_fetch_add_acq_rel(volatile type* atomic_obj, type operand); + type __atomic_fetch_add_seq_cst(volatile type* atomic_obj, type operand); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + type __atomic_fetch_sub_relaxed(volatile type* atomic_obj, type operand); + type __atomic_fetch_sub_consume(volatile type* atomic_obj, type operand); + type __atomic_fetch_sub_acquire(volatile type* atomic_obj, type operand); + type __atomic_fetch_sub_release(volatile type* atomic_obj, type operand); + type __atomic_fetch_sub_acq_rel(volatile type* atomic_obj, type operand); + type __atomic_fetch_sub_seq_cst(volatile type* atomic_obj, type operand); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + type __atomic_fetch_and_relaxed(volatile type* atomic_obj, type operand); + type __atomic_fetch_and_consume(volatile type* atomic_obj, type operand); + type __atomic_fetch_and_acquire(volatile type* atomic_obj, type operand); + type __atomic_fetch_and_release(volatile type* atomic_obj, type operand); + type __atomic_fetch_and_acq_rel(volatile type* atomic_obj, type operand); + type __atomic_fetch_and_seq_cst(volatile type* atomic_obj, type operand); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + type __atomic_fetch_or_relaxed(volatile type* atomic_obj, type operand); + type __atomic_fetch_or_consume(volatile type* atomic_obj, type operand); + type __atomic_fetch_or_acquire(volatile type* atomic_obj, type operand); + type __atomic_fetch_or_release(volatile type* atomic_obj, type operand); + type __atomic_fetch_or_acq_rel(volatile type* atomic_obj, type operand); + type __atomic_fetch_or_seq_cst(volatile type* atomic_obj, type operand); + + // type is one of: char, signed char, unsigned char, short, unsigned short, int, + // unsigned int, long, unsigned long, long long, unsigned long long, + // char16_t, char32_t, wchar_t + type __atomic_fetch_xor_relaxed(volatile type* atomic_obj, type operand); + type __atomic_fetch_xor_consume(volatile type* atomic_obj, type operand); + type __atomic_fetch_xor_acquire(volatile type* atomic_obj, type operand); + type __atomic_fetch_xor_release(volatile type* atomic_obj, type operand); + type __atomic_fetch_xor_acq_rel(volatile type* atomic_obj, type operand); + type __atomic_fetch_xor_seq_cst(volatile type* atomic_obj, type operand); + + void* __atomic_fetch_add_relaxed(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_add_consume(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_add_acquire(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_add_release(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_add_acq_rel(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_add_seq_cst(void* volatile* atomic_obj, ptrdiff_t operand); + + void* __atomic_fetch_sub_relaxed(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_sub_consume(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_sub_acquire(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_sub_release(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_sub_acq_rel(void* volatile* atomic_obj, ptrdiff_t operand); + void* __atomic_fetch_sub_seq_cst(void* volatile* atomic_obj, ptrdiff_t operand); + + void __atomic_thread_fence_relaxed(); + void __atomic_thread_fence_consume(); + void __atomic_thread_fence_acquire(); + void __atomic_thread_fence_release(); + void __atomic_thread_fence_acq_rel(); + void __atomic_thread_fence_seq_cst(); + + void __atomic_signal_fence_relaxed(); + void __atomic_signal_fence_consume(); + void __atomic_signal_fence_acquire(); + void __atomic_signal_fence_release(); + void __atomic_signal_fence_acq_rel(); + void __atomic_signal_fence_seq_cst(); + +Design C: Minimal work for the front end +======================================== +The ```` header is one of the most closely coupled headers to the compiler. +Ideally when you invoke any function from ````, it should result in highly +optimized assembly being inserted directly into your application -- assembly that +is not otherwise representable by higher level C or C++ expressions. The design of +the libc++ ```` header started with this goal in mind. A secondary, but +still very important goal is that the compiler should have to do minimal work to +facilitate the implementation of ````. Without this second goal, then +practically speaking, the libc++ ```` header would be doomed to be a +barely supported, second class citizen on almost every platform. + +Goals: + +- Optimal code generation for atomic operations +- Minimal effort for the compiler to achieve goal 1 on any given platform +- Conformance to the C++0X draft standard + +The purpose of this document is to inform compiler writers what they need to do +to enable a high performance libc++ ```` with minimal effort. + +The minimal work that must be done for a conforming ```` +---------------------------------------------------------------- +The only "atomic" operations that must actually be lock free in +```` are represented by the following compiler intrinsics: + +.. code-block:: cpp + + __atomic_flag__ __atomic_exchange_seq_cst(__atomic_flag__ volatile* obj, __atomic_flag__ desr) { + unique_lock _(some_mutex); + __atomic_flag__ result = *obj; + *obj = desr; + return result; + } + + void __atomic_store_seq_cst(__atomic_flag__ volatile* obj, __atomic_flag__ desr) { + unique_lock _(some_mutex); + *obj = desr; + } + +Where: + +- If ``__has_feature(__atomic_flag)`` evaluates to 1 in the preprocessor then + the compiler must define ``__atomic_flag__`` (e.g. as a typedef to ``int``). +- If ``__has_feature(__atomic_flag)`` evaluates to 0 in the preprocessor then + the library defines ``__atomic_flag__`` as a typedef to ``bool``. +- To communicate that the above intrinsics are available, the compiler must + arrange for ``__has_feature`` to return 1 when fed the intrinsic name + appended with an '_' and the mangled type name of ``__atomic_flag__``. + +For example if ``__atomic_flag__`` is ``unsigned int``: + +.. code-block:: cpp + + // __has_feature(__atomic_flag) == 1 + // __has_feature(__atomic_exchange_seq_cst_j) == 1 + // __has_feature(__atomic_store_seq_cst_j) == 1 + + typedef unsigned int __atomic_flag__; + + unsigned int __atomic_exchange_seq_cst(unsigned int volatile*, unsigned int) { + // ... + } + + void __atomic_store_seq_cst(unsigned int volatile*, unsigned int) { + // ... + } + +That's it! Compiler writers do the above and you've got a fully conforming +(though sub-par performance) ```` header! + + +Recommended work for a higher performance ```` +------------------------------------------------------ +It would be good if the above intrinsics worked with all integral types plus +``void*``. Because this may not be possible to do in a lock-free manner for +all integral types on all platforms, a compiler must communicate each type that +an intrinsic works with. For example, if ``__atomic_exchange_seq_cst`` works +for all types except for ``long long`` and ``unsigned long long`` then: + +.. code-block:: cpp + + __has_feature(__atomic_exchange_seq_cst_b) == 1 // bool + __has_feature(__atomic_exchange_seq_cst_c) == 1 // char + __has_feature(__atomic_exchange_seq_cst_a) == 1 // signed char + __has_feature(__atomic_exchange_seq_cst_h) == 1 // unsigned char + __has_feature(__atomic_exchange_seq_cst_Ds) == 1 // char16_t + __has_feature(__atomic_exchange_seq_cst_Di) == 1 // char32_t + __has_feature(__atomic_exchange_seq_cst_w) == 1 // wchar_t + __has_feature(__atomic_exchange_seq_cst_s) == 1 // short + __has_feature(__atomic_exchange_seq_cst_t) == 1 // unsigned short + __has_feature(__atomic_exchange_seq_cst_i) == 1 // int + __has_feature(__atomic_exchange_seq_cst_j) == 1 // unsigned int + __has_feature(__atomic_exchange_seq_cst_l) == 1 // long + __has_feature(__atomic_exchange_seq_cst_m) == 1 // unsigned long + __has_feature(__atomic_exchange_seq_cst_Pv) == 1 // void* + +Note that only the ``__has_feature`` flag is decorated with the argument +type. The name of the compiler intrinsic is not decorated, but instead works +like a C++ overloaded function. + +Additionally, there are other intrinsics besides ``__atomic_exchange_seq_cst`` +and ``__atomic_store_seq_cst``. They are optional. But if the compiler can +generate faster code than provided by the library, then clients will benefit +from the compiler writer's expertise and knowledge of the targeted platform. + +Below is the complete list of *sequentially consistent* intrinsics, and +their library implementations. Template syntax is used to indicate the desired +overloading for integral and ``void*`` types. The template does not represent a +requirement that the intrinsic operate on **any** type! + +.. code-block:: cpp + + // T is one of: + // bool, char, signed char, unsigned char, short, unsigned short, + // int, unsigned int, long, unsigned long, + // long long, unsigned long long, char16_t, char32_t, wchar_t, void* + + template + T __atomic_load_seq_cst(T const volatile* obj) { + unique_lock _(some_mutex); + return *obj; + } + + template + void __atomic_store_seq_cst(T volatile* obj, T desr) { + unique_lock _(some_mutex); + *obj = desr; + } + + template + T __atomic_exchange_seq_cst(T volatile* obj, T desr) { + unique_lock _(some_mutex); + T r = *obj; + *obj = desr; + return r; + } + + template + bool __atomic_compare_exchange_strong_seq_cst_seq_cst(T volatile* obj, T* exp, T desr) { + unique_lock _(some_mutex); + if (std::memcmp(const_cast(obj), exp, sizeof(T)) == 0) { + std::memcpy(const_cast(obj), &desr, sizeof(T)); + return true; + } + std::memcpy(exp, const_cast(obj), sizeof(T)); + return false; + } + + template + bool __atomic_compare_exchange_weak_seq_cst_seq_cst(T volatile* obj, T* exp, T desr) { + unique_lock _(some_mutex); + if (std::memcmp(const_cast(obj), exp, sizeof(T)) == 0) + { + std::memcpy(const_cast(obj), &desr, sizeof(T)); + return true; + } + std::memcpy(exp, const_cast(obj), sizeof(T)); + return false; + } + + // T is one of: + // char, signed char, unsigned char, short, unsigned short, + // int, unsigned int, long, unsigned long, + // long long, unsigned long long, char16_t, char32_t, wchar_t + + template + T __atomic_fetch_add_seq_cst(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj += operand; + return r; + } + + template + T __atomic_fetch_sub_seq_cst(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj -= operand; + return r; + } + + template + T __atomic_fetch_and_seq_cst(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj &= operand; + return r; + } + + template + T __atomic_fetch_or_seq_cst(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj |= operand; + return r; + } + + template + T __atomic_fetch_xor_seq_cst(T volatile* obj, T operand) { + unique_lock _(some_mutex); + T r = *obj; + *obj ^= operand; + return r; + } + + void* __atomic_fetch_add_seq_cst(void* volatile* obj, ptrdiff_t operand) { + unique_lock _(some_mutex); + void* r = *obj; + (char*&)(*obj) += operand; + return r; + } + + void* __atomic_fetch_sub_seq_cst(void* volatile* obj, ptrdiff_t operand) { + unique_lock _(some_mutex); + void* r = *obj; + (char*&)(*obj) -= operand; + return r; + } + + void __atomic_thread_fence_seq_cst() { + unique_lock _(some_mutex); + } + + void __atomic_signal_fence_seq_cst() { + unique_lock _(some_mutex); + } + +One should consult the (currently draft) `C++ Standard `_ +for the details of the definitions for these operations. For example, +``__atomic_compare_exchange_weak_seq_cst_seq_cst`` is allowed to fail +spuriously while ``__atomic_compare_exchange_strong_seq_cst_seq_cst`` is not. + +If on your platform the lock-free definition of ``__atomic_compare_exchange_weak_seq_cst_seq_cst`` +would be the same as ``__atomic_compare_exchange_strong_seq_cst_seq_cst``, you may omit the +``__atomic_compare_exchange_weak_seq_cst_seq_cst`` intrinsic without a performance cost. The +library will prefer your implementation of ``__atomic_compare_exchange_strong_seq_cst_seq_cst`` +over its own definition for implementing ``__atomic_compare_exchange_weak_seq_cst_seq_cst``. +That is, the library will arrange for ``__atomic_compare_exchange_weak_seq_cst_seq_cst`` to call +``__atomic_compare_exchange_strong_seq_cst_seq_cst`` if you supply an intrinsic for the strong +version but not the weak. + +Taking advantage of weaker memory synchronization +------------------------------------------------- +So far, all of the intrinsics presented require a **sequentially consistent** memory ordering. +That is, no loads or stores can move across the operation (just as if the library had locked +that internal mutex). But ```` supports weaker memory ordering operations. In all, +there are six memory orderings (listed here from strongest to weakest): + +.. code-block:: cpp + + memory_order_seq_cst + memory_order_acq_rel + memory_order_release + memory_order_acquire + memory_order_consume + memory_order_relaxed + +(See the `C++ Standard `_ for the detailed definitions of each of these orderings). + +On some platforms, the compiler vendor can offer some or even all of the above +intrinsics at one or more weaker levels of memory synchronization. This might +lead for example to not issuing an ``mfence`` instruction on the x86. + +If the compiler does not offer any given operation, at any given memory ordering +level, the library will automatically attempt to call the next highest memory +ordering operation. This continues up to ``seq_cst``, and if that doesn't +exist, then the library takes over and does the job with a ``mutex``. This +is a compile-time search and selection operation. At run time, the application +will only see the few inlined assembly instructions for the selected intrinsic. + +Each intrinsic is appended with the 7-letter name of the memory ordering it +addresses. For example a ``load`` with ``relaxed`` ordering is defined by: + +.. code-block:: cpp + + T __atomic_load_relaxed(const volatile T* obj); + +And announced with: + +.. code-block:: cpp + + __has_feature(__atomic_load_relaxed_b) == 1 // bool + __has_feature(__atomic_load_relaxed_c) == 1 // char + __has_feature(__atomic_load_relaxed_a) == 1 // signed char + ... + +The ``__atomic_compare_exchange_strong(weak)`` intrinsics are parameterized +on two memory orderings. The first ordering applies when the operation returns +``true`` and the second ordering applies when the operation returns ``false``. + +Not every memory ordering is appropriate for every operation. ``exchange`` +and the ``fetch_XXX`` operations support all 6. But ``load`` only supports +``relaxed``, ``consume``, ``acquire`` and ``seq_cst``. ``store`` only supports +``relaxed``, ``release``, and ``seq_cst``. The ``compare_exchange`` operations +support the following 16 combinations out of the possible 36: + +.. code-block:: cpp + + relaxed_relaxed + consume_relaxed + consume_consume + acquire_relaxed + acquire_consume + acquire_acquire + release_relaxed + release_consume + release_acquire + acq_rel_relaxed + acq_rel_consume + acq_rel_acquire + seq_cst_relaxed + seq_cst_consume + seq_cst_acquire + seq_cst_seq_cst + +Again, the compiler supplies intrinsics only for the strongest orderings where +it can make a difference. The library takes care of calling the weakest +supplied intrinsic that is as strong or stronger than the customer asked for. + +Note about ABI +============== +With any design, the (back end) compiler writer should note that the decision to +implement lock-free operations on any given type (or not) is an ABI-binding decision. +One can not change from treating a type as not lock free, to lock free (or vice-versa) +without breaking your ABI. + +For example: + +**TU1.cpp**: + +.. code-block:: cpp + + extern atomic A; + int foo() { return A.compare_exchange_strong(w, x); } + + +**TU2.cpp**: + +.. code-block:: cpp + + extern atomic A; + void bar() { return A.compare_exchange_strong(y, z); } + +If only **one** of these calls to ``compare_exchange_strong`` is implemented with +mutex-locked code, then that mutex-locked code will not be executed mutually +exclusively of the one implemented in a lock-free manner. diff --git a/libcxx/docs/index.rst b/libcxx/docs/index.rst index 3b8417eb2c717..c93fbc7e5d654 100644 --- a/libcxx/docs/index.rst +++ b/libcxx/docs/index.rst @@ -165,17 +165,18 @@ Design Documents .. toctree:: :maxdepth: 1 - DesignDocs/DebugMode - DesignDocs/CapturingConfigInfo DesignDocs/ABIVersioning + DesignDocs/AtomicDesign + DesignDocs/CapturingConfigInfo + DesignDocs/DebugMode DesignDocs/ExperimentalFeatures - DesignDocs/VisibilityMacros - DesignDocs/ThreadingSupportAPI - DesignDocs/FileTimeType - DesignDocs/FeatureTestMacros DesignDocs/ExtendedCXX03Support - DesignDocs/UniquePtrTrivialAbi + DesignDocs/FeatureTestMacros + DesignDocs/FileTimeType DesignDocs/NoexceptPolicy + DesignDocs/ThreadingSupportAPI + DesignDocs/UniquePtrTrivialAbi + DesignDocs/VisibilityMacros * ` design `_ * ` design `_ diff --git a/libcxx/www/atomic_design.html b/libcxx/www/atomic_design.html deleted file mode 100644 index c613b9441a836..0000000000000 --- a/libcxx/www/atomic_design.html +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - <atomic> design - - - - - - - -
- -

<atomic> design

- - -

-There are currently 3 designs under consideration. They differ in where most -of the implementation work is done. The functionality exposed to the customer -should be identical (and conforming) for all three designs. -

- -
    -
  1. -Minimal work for the library -
  2. -
  3. -Something in between -
  4. -
  5. -Minimal work for the front end -
  6. -
- -

-With any design, the (back end) compiler writer should note: -

- -
-

-The decision to implement lock-free operations on any given type (or not) is an -ABI-binding decision. One can not change from treating a type as not lock free, -to lock free (or vice-versa) without breaking your ABI. -

- -

-Example: -

- -
-TU1.cc
------------
-extern atomic<long long> A;
-int foo() { return A.compare_exchange_strong(w, x); }
-
-TU2.cc
------------
-extern atomic<long long> A;
-void bar() { return A.compare_exchange_strong(y, z); }
-
-
- -

-If only one of these calls to compare_exchange_strong is -implemented with mutex-locked code, then that mutex-locked code will not be -executed mutually exclusively of the one implemented in a lock-free manner. -

- -
- - diff --git a/libcxx/www/atomic_design_a.html b/libcxx/www/atomic_design_a.html deleted file mode 100644 index 44a2d8f896834..0000000000000 --- a/libcxx/www/atomic_design_a.html +++ /dev/null @@ -1,308 +0,0 @@ - - - - - - <atomic> design - - - - - - - -
- -

<atomic> design

- - -

-The compiler supplies all of the intrinsics as described below. This list of -intrinsics roughly parallels the requirements of the C and C++ atomics -proposals. The C and C++ library implementations simply drop through to these -intrinsics. Anything the platform does not support in hardware, the compiler -arranges for a (compiler-rt) library call to be made which will do the job with -a mutex, and in this case ignoring the memory ordering parameter (effectively -implementing memory_order_seq_cst). -

- -

-Ultimate efficiency is preferred over run time error checking. Undefined -behavior is acceptable when the inputs do not conform as defined below. -

- -
-// In every intrinsic signature below, type* atomic_obj may be a pointer to a
-//    volatile-qualified type.
-// Memory ordering values map to the following meanings:
-//   memory_order_relaxed == 0
-//   memory_order_consume == 1
-//   memory_order_acquire == 2
-//   memory_order_release == 3
-//   memory_order_acq_rel == 4
-//   memory_order_seq_cst == 5
-
-// type must be trivially copyable
-// type represents a "type argument"
-bool __atomic_is_lock_free(type);
-
-// type must be trivially copyable
-// Behavior is defined for mem_ord = 0, 1, 2, 5
-type __atomic_load(const type* atomic_obj, int mem_ord);
-
-// type must be trivially copyable
-// Behavior is defined for mem_ord = 0, 3, 5
-void __atomic_store(type* atomic_obj, type desired, int mem_ord);
-
-// type must be trivially copyable
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_exchange(type* atomic_obj, type desired, int mem_ord);
-
-// type must be trivially copyable
-// Behavior is defined for mem_success = [0 ... 5],
-//   mem_failure <= mem_success
-//   mem_failure != 3
-//   mem_failure != 4
-bool __atomic_compare_exchange_strong(type* atomic_obj,
-                                      type* expected, type desired,
-                                      int mem_success, int mem_failure);
-
-// type must be trivially copyable
-// Behavior is defined for mem_success = [0 ... 5],
-//   mem_failure <= mem_success
-//   mem_failure != 3
-//   mem_failure != 4
-bool __atomic_compare_exchange_weak(type* atomic_obj,
-                                    type* expected, type desired,
-                                    int mem_success, int mem_failure);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_fetch_add(type* atomic_obj, type operand, int mem_ord);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_fetch_sub(type* atomic_obj, type operand, int mem_ord);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_fetch_and(type* atomic_obj, type operand, int mem_ord);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_fetch_or(type* atomic_obj, type operand, int mem_ord);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-// Behavior is defined for mem_ord = [0 ... 5]
-type __atomic_fetch_xor(type* atomic_obj, type operand, int mem_ord);
-
-// Behavior is defined for mem_ord = [0 ... 5]
-void* __atomic_fetch_add(void** atomic_obj, ptrdiff_t operand, int mem_ord);
-void* __atomic_fetch_sub(void** atomic_obj, ptrdiff_t operand, int mem_ord);
-
-// Behavior is defined for mem_ord = [0 ... 5]
-void __atomic_thread_fence(int mem_ord);
-void __atomic_signal_fence(int mem_ord);
-
- -

-If desired the intrinsics taking a single mem_ord parameter can default -this argument to 5. -

- -

-If desired the intrinsics taking two ordering parameters can default -mem_success to 5, and mem_failure to -translate_memory_order(mem_success) where -translate_memory_order(mem_success) is defined as: -

- -
-int
-translate_memory_order(int o)
-{
-    switch (o)
-    {
-    case 4:
-        return 2;
-    case 3:
-        return 0;
-    }
-    return o;
-}
-
- -

-Below are representative C++ implementations of all of the operations. Their -purpose is to document the desired semantics of each operation, assuming -memory_order_seq_cst. This is essentially the code that will be called -if the front end calls out to compiler-rt. -

- -
-template <class T>
-T
-__atomic_load(T const volatile* obj)
-{
-    unique_lock<mutex> _(some_mutex);
-    return *obj;
-}
-
-template <class T>
-void
-__atomic_store(T volatile* obj, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    *obj = desr;
-}
-
-template <class T>
-T
-__atomic_exchange(T volatile* obj, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj = desr;
-    return r;
-}
-
-template <class T>
-bool
-__atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) // if (*obj == *exp)
-    {
-        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); // *obj = desr;
-        return true;
-    }
-    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); // *exp = *obj;
-    return false;
-}
-
-// May spuriously return false (even if *obj == *exp)
-template <class T>
-bool
-__atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) // if (*obj == *exp)
-    {
-        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); // *obj = desr;
-        return true;
-    }
-    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); // *exp = *obj;
-    return false;
-}
-
-template <class T>
-T
-__atomic_fetch_add(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj += operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_sub(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj -= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_and(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj &= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_or(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj |= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_xor(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj ^= operand;
-    return r;
-}
-
-void*
-__atomic_fetch_add(void* volatile* obj, ptrdiff_t operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    void* r = *obj;
-    (char*&)(*obj) += operand;
-    return r;
-}
-
-void*
-__atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    void* r = *obj;
-    (char*&)(*obj) -= operand;
-    return r;
-}
-
-void __atomic_thread_fence()
-{
-    unique_lock<mutex> _(some_mutex);
-}
-
-void __atomic_signal_fence()
-{
-    unique_lock<mutex> _(some_mutex);
-}
-
- - -
- - diff --git a/libcxx/www/atomic_design_b.html b/libcxx/www/atomic_design_b.html deleted file mode 100644 index d9f9c964b61ae..0000000000000 --- a/libcxx/www/atomic_design_b.html +++ /dev/null @@ -1,249 +0,0 @@ - - - - - - <atomic> design - - - - - - - -
- -

<atomic> design

- - -

-This is a variation of design A which puts the burden on the library to arrange -for the correct manipulation of the run time memory ordering arguments, and only -calls the compiler for well-defined memory orderings. I think of this design as -the worst of A and C, instead of the best of A and C. But I offer it as an -option in the spirit of completeness. -

- -
-// type must be trivially copyable
-bool __atomic_is_lock_free(const type* atomic_obj);
-
-// type must be trivially copyable
-type __atomic_load_relaxed(const volatile type* atomic_obj);
-type __atomic_load_consume(const volatile type* atomic_obj);
-type __atomic_load_acquire(const volatile type* atomic_obj);
-type __atomic_load_seq_cst(const volatile type* atomic_obj);
-
-// type must be trivially copyable
-type __atomic_store_relaxed(volatile type* atomic_obj, type desired);
-type __atomic_store_release(volatile type* atomic_obj, type desired);
-type __atomic_store_seq_cst(volatile type* atomic_obj, type desired);
-
-// type must be trivially copyable
-type __atomic_exchange_relaxed(volatile type* atomic_obj, type desired);
-type __atomic_exchange_consume(volatile type* atomic_obj, type desired);
-type __atomic_exchange_acquire(volatile type* atomic_obj, type desired);
-type __atomic_exchange_release(volatile type* atomic_obj, type desired);
-type __atomic_exchange_acq_rel(volatile type* atomic_obj, type desired);
-type __atomic_exchange_seq_cst(volatile type* atomic_obj, type desired);
-
-// type must be trivially copyable
-bool __atomic_compare_exchange_strong_relaxed_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_consume_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_consume_consume(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acquire_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acquire_consume(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acquire_acquire(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_release_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_release_consume(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_release_acquire(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acq_rel_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acq_rel_consume(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_acq_rel_acquire(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_seq_cst_relaxed(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_seq_cst_consume(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_seq_cst_acquire(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-bool __atomic_compare_exchange_strong_seq_cst_seq_cst(volatile type* atomic_obj,
-                                                      type* expected,
-                                                      type desired);
-
-// type must be trivially copyable
-bool __atomic_compare_exchange_weak_relaxed_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_consume_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_consume_consume(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acquire_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acquire_consume(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acquire_acquire(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_release_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_release_consume(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_release_acquire(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acq_rel_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acq_rel_consume(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_acq_rel_acquire(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_seq_cst_relaxed(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_seq_cst_consume(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_seq_cst_acquire(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-bool __atomic_compare_exchange_weak_seq_cst_seq_cst(volatile type* atomic_obj,
-                                                    type* expected,
-                                                    type desired);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-type __atomic_fetch_add_relaxed(volatile type* atomic_obj, type operand);
-type __atomic_fetch_add_consume(volatile type* atomic_obj, type operand);
-type __atomic_fetch_add_acquire(volatile type* atomic_obj, type operand);
-type __atomic_fetch_add_release(volatile type* atomic_obj, type operand);
-type __atomic_fetch_add_acq_rel(volatile type* atomic_obj, type operand);
-type __atomic_fetch_add_seq_cst(volatile type* atomic_obj, type operand);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-type __atomic_fetch_sub_relaxed(volatile type* atomic_obj, type operand);
-type __atomic_fetch_sub_consume(volatile type* atomic_obj, type operand);
-type __atomic_fetch_sub_acquire(volatile type* atomic_obj, type operand);
-type __atomic_fetch_sub_release(volatile type* atomic_obj, type operand);
-type __atomic_fetch_sub_acq_rel(volatile type* atomic_obj, type operand);
-type __atomic_fetch_sub_seq_cst(volatile type* atomic_obj, type operand);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-type __atomic_fetch_and_relaxed(volatile type* atomic_obj, type operand);
-type __atomic_fetch_and_consume(volatile type* atomic_obj, type operand);
-type __atomic_fetch_and_acquire(volatile type* atomic_obj, type operand);
-type __atomic_fetch_and_release(volatile type* atomic_obj, type operand);
-type __atomic_fetch_and_acq_rel(volatile type* atomic_obj, type operand);
-type __atomic_fetch_and_seq_cst(volatile type* atomic_obj, type operand);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-type __atomic_fetch_or_relaxed(volatile type* atomic_obj, type operand);
-type __atomic_fetch_or_consume(volatile type* atomic_obj, type operand);
-type __atomic_fetch_or_acquire(volatile type* atomic_obj, type operand);
-type __atomic_fetch_or_release(volatile type* atomic_obj, type operand);
-type __atomic_fetch_or_acq_rel(volatile type* atomic_obj, type operand);
-type __atomic_fetch_or_seq_cst(volatile type* atomic_obj, type operand);
-
-// type is one of: char, signed char, unsigned char, short, unsigned short, int,
-//      unsigned int, long, unsigned long, long long, unsigned long long,
-//      char16_t, char32_t, wchar_t
-type __atomic_fetch_xor_relaxed(volatile type* atomic_obj, type operand);
-type __atomic_fetch_xor_consume(volatile type* atomic_obj, type operand);
-type __atomic_fetch_xor_acquire(volatile type* atomic_obj, type operand);
-type __atomic_fetch_xor_release(volatile type* atomic_obj, type operand);
-type __atomic_fetch_xor_acq_rel(volatile type* atomic_obj, type operand);
-type __atomic_fetch_xor_seq_cst(volatile type* atomic_obj, type operand);
-
-void* __atomic_fetch_add_relaxed(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_add_consume(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_add_acquire(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_add_release(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_add_acq_rel(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_add_seq_cst(void* volatile* atomic_obj, ptrdiff_t operand);
-
-void* __atomic_fetch_sub_relaxed(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_sub_consume(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_sub_acquire(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_sub_release(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_sub_acq_rel(void* volatile* atomic_obj, ptrdiff_t operand);
-void* __atomic_fetch_sub_seq_cst(void* volatile* atomic_obj, ptrdiff_t operand);
-
-void __atomic_thread_fence_relaxed();
-void __atomic_thread_fence_consume();
-void __atomic_thread_fence_acquire();
-void __atomic_thread_fence_release();
-void __atomic_thread_fence_acq_rel();
-void __atomic_thread_fence_seq_cst();
-
-void __atomic_signal_fence_relaxed();
-void __atomic_signal_fence_consume();
-void __atomic_signal_fence_acquire();
-void __atomic_signal_fence_release();
-void __atomic_signal_fence_acq_rel();
-void __atomic_signal_fence_seq_cst();
-
- -
- - diff --git a/libcxx/www/atomic_design_c.html b/libcxx/www/atomic_design_c.html deleted file mode 100644 index eb7ea3d0aca4f..0000000000000 --- a/libcxx/www/atomic_design_c.html +++ /dev/null @@ -1,457 +0,0 @@ - - - - - - <atomic> design - - - - - - - -
- -

<atomic> design

- - -

-The <atomic> header is one of the most closely coupled headers to -the compiler. Ideally when you invoke any function from -<atomic>, it should result in highly optimized assembly being -inserted directly into your application ... assembly that is not otherwise -representable by higher level C or C++ expressions. The design of the libc++ -<atomic> header started with this goal in mind. A secondary, but -still very important goal is that the compiler should have to do minimal work to -facilitate the implementation of <atomic>. Without this second -goal, then practically speaking, the libc++ <atomic> header would -be doomed to be a barely supported, second class citizen on almost every -platform. -

- -

Goals:

- -
    -
  • Optimal code generation for atomic operations
  • -
  • Minimal effort for the compiler to achieve goal 1 on any given platform
  • -
  • Conformance to the C++0X draft standard
  • -
- -

-The purpose of this document is to inform compiler writers what they need to do -to enable a high performance libc++ <atomic> with minimal effort. -

- -

The minimal work that must be done for a conforming <atomic>

- -

-The only "atomic" operations that must actually be lock free in -<atomic> are represented by the following compiler intrinsics: -

- -
-__atomic_flag__
-__atomic_exchange_seq_cst(__atomic_flag__ volatile* obj, __atomic_flag__ desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    __atomic_flag__ result = *obj;
-    *obj = desr;
-    return result;
-}
-
-void
-__atomic_store_seq_cst(__atomic_flag__ volatile* obj, __atomic_flag__ desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    *obj = desr;
-}
-
- -

-Where: -

- -
    -
  • -If __has_feature(__atomic_flag) evaluates to 1 in the preprocessor then -the compiler must define __atomic_flag__ (e.g. as a typedef to -int). -
  • -
  • -If __has_feature(__atomic_flag) evaluates to 0 in the preprocessor then -the library defines __atomic_flag__ as a typedef to bool. -
  • -
  • -

    -To communicate that the above intrinsics are available, the compiler must -arrange for __has_feature to return 1 when fed the intrinsic name -appended with an '_' and the mangled type name of __atomic_flag__. -

    -

    -For example if __atomic_flag__ is unsigned int: -

    -
    -__has_feature(__atomic_flag) == 1
    -__has_feature(__atomic_exchange_seq_cst_j) == 1
    -__has_feature(__atomic_store_seq_cst_j) == 1
    -
    -typedef unsigned int __atomic_flag__;
    -
    -unsigned int __atomic_exchange_seq_cst(unsigned int volatile*, unsigned int)
    -{
    -   // ...
    -}
    -
    -void __atomic_store_seq_cst(unsigned int volatile*, unsigned int)
    -{
    -   // ...
    -}
    -
    -
  • -
- -

-That's it! Compiler writers do the above and you've got a fully conforming -(though sub-par performance) <atomic> header! -

- -

Recommended work for a higher performance <atomic>

- -

-It would be good if the above intrinsics worked with all integral types plus -void*. Because this may not be possible to do in a lock-free manner for -all integral types on all platforms, a compiler must communicate each type that -an intrinsic works with. For example if __atomic_exchange_seq_cst works -for all types except for long long and unsigned long long -then: -

- -
-__has_feature(__atomic_exchange_seq_cst_b) == 1  // bool
-__has_feature(__atomic_exchange_seq_cst_c) == 1  // char
-__has_feature(__atomic_exchange_seq_cst_a) == 1  // signed char
-__has_feature(__atomic_exchange_seq_cst_h) == 1  // unsigned char
-__has_feature(__atomic_exchange_seq_cst_Ds) == 1 // char16_t
-__has_feature(__atomic_exchange_seq_cst_Di) == 1 // char32_t
-__has_feature(__atomic_exchange_seq_cst_w) == 1  // wchar_t
-__has_feature(__atomic_exchange_seq_cst_s) == 1  // short
-__has_feature(__atomic_exchange_seq_cst_t) == 1  // unsigned short
-__has_feature(__atomic_exchange_seq_cst_i) == 1  // int
-__has_feature(__atomic_exchange_seq_cst_j) == 1  // unsigned int
-__has_feature(__atomic_exchange_seq_cst_l) == 1  // long
-__has_feature(__atomic_exchange_seq_cst_m) == 1  // unsigned long
-__has_feature(__atomic_exchange_seq_cst_Pv) == 1 // void*
-
- -

-Note that only the __has_feature flag is decorated with the argument -type. The name of the compiler intrinsic is not decorated, but instead works -like a C++ overloaded function. -

- -

-Additionally there are other intrinsics besides -__atomic_exchange_seq_cst and __atomic_store_seq_cst. They -are optional. But if the compiler can generate faster code than provided by the -library, then clients will benefit from the compiler writer's expertise and -knowledge of the targeted platform. -

- -

-Below is the complete list of sequentially consistent intrinsics, and -their library implementations. Template syntax is used to indicate the desired -overloading for integral and void* types. The template does not represent a -requirement that the intrinsic operate on any type! -

- -
-T is one of:  bool, char, signed char, unsigned char, short, unsigned short,
-              int, unsigned int, long, unsigned long,
-              long long, unsigned long long, char16_t, char32_t, wchar_t, void*
-
-template <class T>
-T
-__atomic_load_seq_cst(T const volatile* obj)
-{
-    unique_lock<mutex> _(some_mutex);
-    return *obj;
-}
-
-template <class T>
-void
-__atomic_store_seq_cst(T volatile* obj, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    *obj = desr;
-}
-
-template <class T>
-T
-__atomic_exchange_seq_cst(T volatile* obj, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj = desr;
-    return r;
-}
-
-template <class T>
-bool
-__atomic_compare_exchange_strong_seq_cst_seq_cst(T volatile* obj, T* exp, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0)
-    {
-        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T));
-        return true;
-    }
-    std::memcpy(exp, const_cast<T*>(obj), sizeof(T));
-    return false;
-}
-
-template <class T>
-bool
-__atomic_compare_exchange_weak_seq_cst_seq_cst(T volatile* obj, T* exp, T desr)
-{
-    unique_lock<mutex> _(some_mutex);
-    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0)
-    {
-        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T));
-        return true;
-    }
-    std::memcpy(exp, const_cast<T*>(obj), sizeof(T));
-    return false;
-}
-
-T is one of:  char, signed char, unsigned char, short, unsigned short,
-              int, unsigned int, long, unsigned long,
-              long long, unsigned long long, char16_t, char32_t, wchar_t
-
-template <class T>
-T
-__atomic_fetch_add_seq_cst(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj += operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_sub_seq_cst(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj -= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_and_seq_cst(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj &= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_or_seq_cst(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj |= operand;
-    return r;
-}
-
-template <class T>
-T
-__atomic_fetch_xor_seq_cst(T volatile* obj, T operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    T r = *obj;
-    *obj ^= operand;
-    return r;
-}
-
-void*
-__atomic_fetch_add_seq_cst(void* volatile* obj, ptrdiff_t operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    void* r = *obj;
-    (char*&)(*obj) += operand;
-    return r;
-}
-
-void*
-__atomic_fetch_sub_seq_cst(void* volatile* obj, ptrdiff_t operand)
-{
-    unique_lock<mutex> _(some_mutex);
-    void* r = *obj;
-    (char*&)(*obj) -= operand;
-    return r;
-}
-
-void __atomic_thread_fence_seq_cst()
-{
-    unique_lock<mutex> _(some_mutex);
-}
-
-void __atomic_signal_fence_seq_cst()
-{
-    unique_lock<mutex> _(some_mutex);
-}
-
- -

-One should consult the (currently draft) -C++ standard -for the details of the definitions for these operations. For example -__atomic_compare_exchange_weak_seq_cst_seq_cst is allowed to fail -spuriously while __atomic_compare_exchange_strong_seq_cst_seq_cst is -not. -

- -

-If on your platform the lock-free definition of -__atomic_compare_exchange_weak_seq_cst_seq_cst would be the same as -__atomic_compare_exchange_strong_seq_cst_seq_cst, you may omit the -__atomic_compare_exchange_weak_seq_cst_seq_cst intrinsic without a -performance cost. The library will prefer your implementation of -__atomic_compare_exchange_strong_seq_cst_seq_cst over its own -definition for implementing -__atomic_compare_exchange_weak_seq_cst_seq_cst. That is, the library -will arrange for __atomic_compare_exchange_weak_seq_cst_seq_cst to call -__atomic_compare_exchange_strong_seq_cst_seq_cst if you supply an -intrinsic for the strong version but not the weak. -

- -

Taking advantage of weaker memory synchronization

- -

-So far all of the intrinsics presented require a sequentially -consistent memory ordering. That is, no loads or stores can move across -the operation (just as if the library had locked that internal mutex). But -<atomic> supports weaker memory ordering operations. In all, -there are six memory orderings (listed here from strongest to weakest): -

- -
-memory_order_seq_cst
-memory_order_acq_rel
-memory_order_release
-memory_order_acquire
-memory_order_consume
-memory_order_relaxed
-
- -

-(See the -C++ standard -for the detailed definitions of each of these orderings). -

- -

-On some platforms, the compiler vendor can offer some or even all of the above -intrinsics at one or more weaker levels of memory synchronization. This might -lead for example to not issuing an mfence instruction on the x86. -

- -

-If the compiler does not offer any given operation, at any given memory ordering -level, the library will automatically attempt to call the next highest memory -ordering operation. This continues up to seq_cst, and if that doesn't -exist, then the library takes over and does the job with a mutex. This -is a compile-time search & selection operation. At run time, the -application will only see the few inlined assembly instructions for the selected -intrinsic. -

- -

-Each intrinsic is appended with the 7-letter name of the memory ordering it -addresses. For example a load with relaxed ordering is -defined by: -

- -
-T __atomic_load_relaxed(const volatile T* obj);
-
- -

-And announced with: -

- -
-__has_feature(__atomic_load_relaxed_b) == 1  // bool
-__has_feature(__atomic_load_relaxed_c) == 1  // char
-__has_feature(__atomic_load_relaxed_a) == 1  // signed char
-...
-
- -

-The __atomic_compare_exchange_strong(weak) intrinsics are parameterized -on two memory orderings. The first ordering applies when the operation returns -true and the second ordering applies when the operation returns -false. -

- -

-Not every memory ordering is appropriate for every operation. exchange -and the fetch_op operations support all 6. But load -only supports relaxed, consume, acquire and seq_cst. -store -only supports relaxed, release, and seq_cst. The -compare_exchange operations support the following 16 combinations out -of the possible 36: -

- -
-relaxed_relaxed
-consume_relaxed
-consume_consume
-acquire_relaxed
-acquire_consume
-acquire_acquire
-release_relaxed
-release_consume
-release_acquire
-acq_rel_relaxed
-acq_rel_consume
-acq_rel_acquire
-seq_cst_relaxed
-seq_cst_consume
-seq_cst_acquire
-seq_cst_seq_cst
-
- -

-Again, the compiler supplies intrinsics only for the strongest orderings where -it can make a difference. The library takes care of calling the weakest -supplied intrinsic that is as strong or stronger than the customer asked for. -

- -
- - diff --git a/libcxx/www/content.css b/libcxx/www/content.css deleted file mode 100644 index dca6a32914369..0000000000000 --- a/libcxx/www/content.css +++ /dev/null @@ -1,27 +0,0 @@ -html { margin: 0px; } body { margin: 8px; } - -html, body { - padding:0px; - font-size:small; font-family:"Lucida Grande", "Lucida Sans Unicode", Arial, Verdana, Helvetica, sans-serif; background-color: #fff; color: #222; - line-height:1.5; -} - -h1, h2, h3, tt { color: #000 } - -h1 { padding-top:0px; margin-top:0px;} -h2 { color:#333333; padding-top:0.5em; } -h3 { padding-top: 0.5em; margin-bottom: -0.25em; color:#2d58b7} -li { padding-bottom: 0.5em; } -ul { padding-left:1.5em; } - -/* Slides */ -IMG.img_slide { - display: block; - margin-left: auto; - margin-right: auto -} - -.itemTitle { color:#2d58b7 } - -/* Tables */ -tr { vertical-align:top } diff --git a/libcxx/www/cxx1y_status.html b/libcxx/www/cxx1y_status.html deleted file mode 100644 index 5370b14babea6..0000000000000 --- a/libcxx/www/cxx1y_status.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - libc++ C++14 Status - - - If you are not redirected automatically, follow this link to new documentation. - - diff --git a/libcxx/www/cxx1z_status.html b/libcxx/www/cxx1z_status.html deleted file mode 100644 index 20e717be35fde..0000000000000 --- a/libcxx/www/cxx1z_status.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - libc++ C++17 Status - - - If you are not redirected automatically, follow this link to new documentation. - - diff --git a/libcxx/www/cxx2a_status.html b/libcxx/www/cxx2a_status.html deleted file mode 100644 index 4637d07de938a..0000000000000 --- a/libcxx/www/cxx2a_status.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - libc++ C++20 Status - - - If you are not redirected automatically, follow this link to new documentation. - - diff --git a/libcxx/www/index.html b/libcxx/www/index.html deleted file mode 100644 index cb94eb8b2e5a1..0000000000000 --- a/libcxx/www/index.html +++ /dev/null @@ -1,234 +0,0 @@ - - - - - - "libc++" C++ Standard Library - - - - - - - -
- -

"libc++" C++ Standard Library

- - -

libc++ is an implementation of the C++ standard library, targeting - C++11, C++14 and above.

- -

All of the code in libc++ is dual licensed - under the MIT license and the UIUC License (a BSD-like license).

- - -

New Documentation Coming Soon!

- - -

Looking for documentation on how to use, build and test libc++? If so - checkout the new libc++ documentation.

- -

- Click here for the new libc++ documentation.

- - -

Features and Goals

- - -
    -
  • Correctness as defined by the C++11 standard.
  • -
  • Fast execution.
  • -
  • Minimal memory use.
  • -
  • Fast compile times.
  • -
  • ABI compatibility with gcc's libstdc++ for some low-level features - such as exception objects, rtti and memory allocation.
  • -
  • Extensive unit tests.
  • -
- - -

Why a new C++ Standard Library for C++11?

- - -

After its initial introduction, many people have asked "why start a new - library instead of contributing to an existing library?" (like Apache's - libstdcxx, GNU's libstdc++, STLport, etc). There are many contributing - reasons, but some of the major ones are:

- -
    -
  • From years of experience (including having implemented the standard - library before), we've learned many things about implementing - the standard containers which require ABI breakage and fundamental changes - to how they are implemented. For example, it is generally accepted that - building std::string using the "short string optimization" instead of - using Copy On Write (COW) is a superior approach for multicore - machines (particularly in C++11, which has rvalue references). Breaking - ABI compatibility with old versions of the library was - determined to be critical to achieving the performance goals of - libc++.

  • - -
  • Mainline libstdc++ has switched to GPL3, a license which the developers - of libc++ cannot use. libstdc++ 4.2 (the last GPL2 version) could be - independently extended to support C++11, but this would be a fork of the - codebase (which is often seen as worse for a project than starting a new - independent one). Another problem with libstdc++ is that it is tightly - integrated with G++ development, tending to be tied fairly closely to the - matching version of G++.

    -
  • - -
  • STLport and the Apache libstdcxx library are two other popular - candidates, but both lack C++11 support. Our experience (and the - experience of libstdc++ developers) is that adding support for C++11 (in - particular rvalue references and move-only types) requires changes to - almost every class and function, essentially amounting to a rewrite. - Faced with a rewrite, we decided to start from scratch and evaluate every - design decision from first principles based on experience.

    - -

    Further, both projects are apparently abandoned: STLport 5.2.1 was - released in Oct'08, and STDCXX 4.2.1 in May'08.

    - -
- - -

Platform Support

- - -

- libc++ is known to work on the following platforms, using g++ and - clang. Note that functionality provided by <atomic> is only functional with - clang. -

- -
    -
  • Mac OS X i386
  • -
  • Mac OS X x86_64
  • -
  • FreeBSD 10+ i386
  • -
  • FreeBSD 10+ x86_64
  • -
  • FreeBSD 10+ ARM
  • -
  • Linux i386
  • -
  • Linux x86_64
  • -
- -

- The library also supports Windows (both MSVC-style environments, - built with clang-cl, and MinGW environments), although support for - Windows is less mature than for the platforms listed above. -

- - -

Current Status

- - -

libc++ is a 100% complete C++11 implementation on Apple's OS X.

-

LLVM and Clang can self host in C++ and C++11 mode with libc++ on Linux.

-

libc++ is also a 100% complete C++14 implementation. A list of new features and - changes for C++14 can be found here.

-

libc++'s C++17 implementation is not yet complete. A list of features and changes - for C++17 can be found here.

-

A list of features and changes for the next C++ standard, known here as - "C++2a" (probably to be C++20) can be found here.

-

Implementation of the post-C++14 Technical Specifications is in progress. A list of features - and the current status of these features can be found here.

-

As features get moved from the Technical Specifications into the main standard, we - will (after a period for migration) remove them from the TS implementation. This - process is detailed here.

- - -

Build Bots

- -

The latest libc++ build results can be found at the following locations.

- - - -

Get it and get involved!

- - -

First please review our - Developer's Policy. - - The documentation for building and using libc++ can be found below. -

- - -

Notes and Known Issues

- - -

-

    -
  • - Building libc++ with -fno-rtti is not supported. However - linking against it with -fno-rtti is supported. -
  • -
-

- -

Send discussions to the - libc++ mailing list.

- - -

Bug reports and patches

- - -

- If you think you've found a bug in libc++, please report it using - the LLVM Bugzilla. If you're not sure, you - can post a message to the libcxx-dev - mailing list or on IRC. -

- -

- If you want to contribute a patch to libc++, the best place for that is - Phabricator. Please - add libcxx-commits as a subscriber. -

- - -

Design Documents

- - - - -
- - diff --git a/libcxx/www/menu.css b/libcxx/www/menu.css deleted file mode 100644 index 4a887b1907a32..0000000000000 --- a/libcxx/www/menu.css +++ /dev/null @@ -1,39 +0,0 @@ -/***************/ -/* page layout */ -/***************/ - -[id=menu] { - position:fixed; - width:25ex; -} -[id=content] { - /* ***** EDIT THIS VALUE IF CONTENT OVERLAPS MENU ***** */ - position:absolute; - left:29ex; - padding-right:4ex; -} - -/**************/ -/* menu style */ -/**************/ - -#menu .submenu { - padding-top:1em; - display:block; -} - -#menu label { - display:block; - font-weight: bold; - text-align: center; - background-color: rgb(192,192,192); -} -#menu a { - padding:0 .2em; - display:block; - text-align: center; - background-color: rgb(235,235,235); -} -#menu a:visited { - color:rgb(100,50,100); -} diff --git a/libcxx/www/ts1z_status.html b/libcxx/www/ts1z_status.html deleted file mode 100644 index 1cf448bd673af..0000000000000 --- a/libcxx/www/ts1z_status.html +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - libc++ Fundamentals TS Status - - - - - - - -
- -

Post-C++14 TS Implementation Status

- - -

In November 2014, the C++ standard committee approved the draft for the next version of the C++ standard, known as "C++1z" (probably to be C++17)

-

In addition, there are several "Technical Specifications", that consist of new features that are proposed, but not yet accepted for C++1z.

-

This page shows the status of libc++; the status of clang's support of the language features is here.

- -

Technical Specifications

- - - - - - - - - - - - - -
Paper NumberPaper TitleTS
4023C++ Extensions for Library FundamentalsLibrary Fundamentals 1
3940Technical Specification - File SystemFile System
4273Uniform Container Erasure.Library Fundamentals 2
4061Greatest Common Divisor and Least Common Multiple.Library Fundamentals 2
4257Delimited iterators.Library Fundamentals 2
4282The World's Dumbest Smart Pointer.Library Fundamentals 2
- -

Features in Library Fundamentals 1

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Feature NameStatus
Uses-allocator constructionNot started
Changes to std::shared_ptr and weak_ptrNot started
Additions to std::functionNot started
Additions to std::promiseNot started
Additions to std::packaged_taskNot started
Class erased_typeComplete
Calling a function with a tuple of argumentsComplete
Type traits (_v)Complete
Other type transformationsNot started
Compile-time Rational ArithmeticImplementation in progress
Time UtilitiesComplete
System Error SupportComplete
Class memory_resourceComplete
Class template polymorphic_allocatorComplete
Template alias resource_adaptorComplete
Global memory resourcesComplete
Pool resource classesImplementation in progress
Class monotonic_buffer_resourceImplementation in progress
Alias templates using polymorphic memory resourceComplete
SearchersComplete
Optional ObjectsInitial implementation complete
class anyComplete
string_viewComplete
memoryImplementation in progress
Algorithms libraryComplete
- -

Features in Library Fundamentals 2

- - - - -
Feature NameStatus
- -

Features in Filesystem

- - - - -
Feature NameStatusFirst released version
All featuresComplete3.9
- -

Last Updated: 17-June-2016

-
- - diff --git a/libcxx/www/type_traits_design.html b/libcxx/www/type_traits_design.html deleted file mode 100644 index acbef071cc55f..0000000000000 --- a/libcxx/www/type_traits_design.html +++ /dev/null @@ -1,285 +0,0 @@ - - - - - - type traits intrinsic design - - - - - - - -
- -

Type traits intrinsic design

- - -

-This is a survey of the type traits intrinsics clang has, and those needed. -The names and definitions of several of the needed type traits has recently -changed. Please see: -N3142. -

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Legend
clang supplies it and it is absolutely necessarysome_trait(T)
clang supplies it and it is usefulsome_trait(T)
clang supplies it and it is not neededsome_trait(T)
clang does not supply it and it is not needed
clang does not supply it and it is absolutely necessarysome_trait(T)
- -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Needed type traits vs clang type traits
libc++ Needsclang Has
is_union<T>__is_union(T)
is_class<T>__is_class(T)
is_enum<T>__is_enum(T)
is_pod<T>__is_pod(T)
has_virtual_destructor<T>__has_virtual_destructor(T)
is_constructible<T, Args...>
is_default_constructible<T>
is_copy_constructible<T>
is_move_constructible<T>
is_assignable<T, U>
is_copy_assignable<T>
is_move_assignable<T>
is_destructible<T>
is_trivially_constructible<T, Args...>__is_trivially_constructible(T, U)
is_trivially_default_constructible<T>__has_trivial_constructor(T)
is_trivially_copy_constructible<T>__has_trivial_copy(T)
is_trivially_move_constructible<T>
is_trivially_assignable<T, U>__is_trivially_assignable(T, U)
is_trivially_copy_assignable<T>__has_trivial_assign(T)
is_trivially_move_assignable<T>
is_trivially_destructible<T>__has_trivial_destructor(T)
is_nothrow_constructible<T, Args...>
is_nothrow_default_constructible<T>__has_nothrow_constructor(T)
is_nothrow_copy_constructible<T>__has_nothrow_copy(T)
is_nothrow_move_constructible<T>
is_nothrow_assignable<T, U>
is_nothrow_copy_assignable<T>__has_nothrow_assign(T)
is_nothrow_move_assignable<T>
is_nothrow_destructible<T>
is_trivial<T>__is_trivial(T)
is_trivially_copyable<T>__is_trivially_copyable(T)
is_standard_layout<T>__is_standard_layout(T)
is_literal_type<T>__is_literal_type(T)
is_convertible<T, U>__is_convertible_to(T, U)
is_base_of<T, U>__is_base_of(T, U)
underlying_type<T>__underlying_type(T)
is_polymorphic<T>__is_polymorphic(T)
is_empty<T>__is_empty(T)
is_abstract<T>__is_abstract(T)
-
- -
- - diff --git a/libcxx/www/upcoming_meeting.html b/libcxx/www/upcoming_meeting.html deleted file mode 100644 index cff94d096aa00..0000000000000 --- a/libcxx/www/upcoming_meeting.html +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - libc++ Upcoming Meeting Status - - - - - - - -
- -

libc++ Upcoming Meeting Status

- - -

This is a temporary page; please check the c++2a status here

-

This page shows the status of the papers and issues that are expected to be adopted in the next WG21 Meeting.

- -

The groups that have contributed papers: -

    -
  • LWG - Library working group
  • -
  • CWG - Core Language Working group
  • -
  • SG1 - Study group #1 (Concurrency working group)
  • -
-

- -

Paper Status

- - -
Paper #GroupPaper NameMeetingStatus
- - -

Library Working group Issues Status

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Issue #Issue NameMeetingStatus
3231year_month_day_last::day specification does not cover !ok() valuesBelfast
3225zoned_time converting constructor shall not be noexceptBelfast
3190std::allocator::allocate sometimes returns too little storageBelfast
3218Modifier for %d parse flag does not match POSIX and format specificationBelfast
3224zoned_time constructor from TimeZonePtr does not specify initialization of tp_Belfast
3230Format specifier %y/%Y is missing locale alternative versionsBelfast
3232Inconsistency in zoned_time deduction guidesBelfast
3222P0574R1 introduced preconditions on non-existent parametersBelfast
3221Result of year_month arithmetic with months is ambiguousBelfast
3235parse manipulator without abbreviation is not callableBelfast
3246What are the constraints on the template parameter of basic_format_arg?Belfast
3253basic_syncbuf::basic_syncbuf() should not be explicitBelfast
3245Unnecessary restriction on '%p' parse specifierBelfast
3244Constraints for Source in §[fs.path.req] insufficiently constraintyBelfast
3241chrono-spec grammar ambiguity in §[time.format]Belfast
3257Missing feature testing macro update from P0858Belfast
3256Feature testing macro for constexpr algorithmsBelfast
3273Specify weekday_indexed to range of [0, 7]Belfast
3070path::lexically_relative causes surprising results if a filename can also be a root-nameBelfast
3266to_chars(bool) should be deletedBelfast
3272%I%p should parse/format duration since midnightBelfast
3259The definition of constexpr iterators should be adjustedBelfast
3103Errors in taking subview of span should be ill-formed where possibleBelfast
3274Missing feature test macro for <span>Belfast
3276Class split_view::outer_iterator::value_type should inherit from view_interfaceBelfast
3277Pre-increment on prvalues is not a requirement of weakly_incrementableBelfast
3149DefaultConstructible should require default initializationBelfast
- -

Issues to "Review"

- - -
Issue #Issue NameMeetingStatus
- -

Comments about the papers

-
    -
- - -

Comments about the issues

-
    -
  • 3231 -
  • -
  • 3225 -
  • -
  • 3190 -
  • -
  • 3218 -
  • -
  • 3224 -
  • -
  • 3230 -
  • -
  • 3232 -
  • -
  • 3222 -
  • -
  • 3221 -
  • -
  • 3235 -
  • -
  • 3246 -
  • -
  • 3253 -
  • -
  • 3245 -
  • -
  • 3244 -
  • -
  • 3241 -
  • -
  • 3257 -
  • -
  • 3256 -
  • -
  • 3273 -
  • -
  • 3070 -
  • -
  • 3266 -
  • -
  • 3272 -
  • -
  • 3259 -
  • -
  • 3103 -
  • -
  • 3274 -
  • -
  • 3276 -
  • -
  • 3277 -
  • -
  • 3149 -
  • -
- - - -

Last Updated: 22-Oct-2019

-
- -