Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Import Chromium sources r15462

  • Loading branch information...
commit 6545af0f08901467cf89dc99eb8eebcb3f354f38 1 parent 8b1eabc
@bsmedberg bsmedberg authored
Showing with 30,222 additions and 0 deletions.
  1. +6 −0 ipc/chromium/src/base/DEPS
  2. +67 −0 ipc/chromium/src/base/at_exit.cc
  3. +71 −0 ipc/chromium/src/base/at_exit.h
  4. +85 −0 ipc/chromium/src/base/at_exit_unittest.cc
  5. +63 −0 ipc/chromium/src/base/atomic_ref_count.h
  6. +30 −0 ipc/chromium/src/base/atomic_sequence_num.h
  7. +139 −0 ipc/chromium/src/base/atomicops.h
  8. +124 −0 ipc/chromium/src/base/atomicops_internals_arm_gcc.h
  9. +104 −0 ipc/chromium/src/base/atomicops_internals_x86_gcc.cc
  10. +248 −0 ipc/chromium/src/base/atomicops_internals_x86_gcc.h
  11. +279 −0 ipc/chromium/src/base/atomicops_internals_x86_macosx.h
  12. +167 −0 ipc/chromium/src/base/atomicops_internals_x86_msvc.h
  13. +237 −0 ipc/chromium/src/base/atomicops_unittest.cc
  14. +724 −0 ipc/chromium/src/base/base.gyp
  15. +60 −0 ipc/chromium/src/base/base_drag_source.cc
  16. +46 −0 ipc/chromium/src/base/base_drag_source.h
  17. +167 −0 ipc/chromium/src/base/base_drop_target.cc
  18. +130 −0 ipc/chromium/src/base/base_drop_target.h
  19. +38 −0 ipc/chromium/src/base/base_paths.cc
  20. +35 −0 ipc/chromium/src/base/base_paths.h
  21. +46 −0 ipc/chromium/src/base/base_paths_linux.cc
  22. +30 −0 ipc/chromium/src/base/base_paths_linux.h
  23. +31 −0 ipc/chromium/src/base/base_paths_mac.h
  24. +58 −0 ipc/chromium/src/base/base_paths_mac.mm
  25. +118 −0 ipc/chromium/src/base/base_paths_win.cc
  26. +42 −0 ipc/chromium/src/base/base_paths_win.h
  27. +43 −0 ipc/chromium/src/base/base_switches.cc
  28. +23 −0 ipc/chromium/src/base/base_switches.h
  29. +335 −0 ipc/chromium/src/base/basictypes.h
  30. +8 −0 ipc/chromium/src/base/build/base.vsprops
  31. +8 −0 ipc/chromium/src/base/build/base_gfx.vsprops
  32. +8 −0 ipc/chromium/src/base/build/base_unittests.vsprops
  33. +12 −0 ipc/chromium/src/base/bzip2_error_handler.cc
  34. +64 −0 ipc/chromium/src/base/clipboard.cc
  35. +204 −0 ipc/chromium/src/base/clipboard.h
  36. +328 −0 ipc/chromium/src/base/clipboard_linux.cc
  37. +283 −0 ipc/chromium/src/base/clipboard_mac.mm
  38. +282 −0 ipc/chromium/src/base/clipboard_unittest.cc
  39. +488 −0 ipc/chromium/src/base/clipboard_util.cc
  40. +63 −0 ipc/chromium/src/base/clipboard_util.h
  41. +650 −0 ipc/chromium/src/base/clipboard_win.cc
  42. +362 −0 ipc/chromium/src/base/command_line.cc
  43. +185 −0 ipc/chromium/src/base/command_line.h
  44. +128 −0 ipc/chromium/src/base/command_line_unittest.cc
  45. +77 −0 ipc/chromium/src/base/compiler_specific.h
  46. +174 −0 ipc/chromium/src/base/condition_variable.h
  47. +61 −0 ipc/chromium/src/base/condition_variable_posix.cc
  48. +738 −0 ipc/chromium/src/base/condition_variable_unittest.cc
  49. +446 −0 ipc/chromium/src/base/condition_variable_win.cc
  50. +54 −0 ipc/chromium/src/base/cpu.cc
  51. +42 −0 ipc/chromium/src/base/cpu.h
  52. +72 −0 ipc/chromium/src/base/crypto/cssm_init.cc
  53. +17 −0 ipc/chromium/src/base/crypto/cssm_init.h
  54. +105 −0 ipc/chromium/src/base/crypto/signature_verifier.h
  55. +143 −0 ipc/chromium/src/base/crypto/signature_verifier_mac.cc
  56. +114 −0 ipc/chromium/src/base/crypto/signature_verifier_nss.cc
  57. +268 −0 ipc/chromium/src/base/crypto/signature_verifier_unittest.cc
  58. +148 −0 ipc/chromium/src/base/crypto/signature_verifier_win.cc
  59. BIN  ipc/chromium/src/base/data/data_pack_unittest/sample.pak
  60. BIN  ipc/chromium/src/base/data/file_util_unittest/binary_file.bin
  61. BIN  ipc/chromium/src/base/data/file_util_unittest/binary_file_diff.bin
  62. BIN  ipc/chromium/src/base/data/file_util_unittest/binary_file_same.bin
  63. +1 −0  ipc/chromium/src/base/data/file_util_unittest/different.txt
  64. +1 −0  ipc/chromium/src/base/data/file_util_unittest/different_first.txt
  65. +1 −0  ipc/chromium/src/base/data/file_util_unittest/different_last.txt
  66. 0  ipc/chromium/src/base/data/file_util_unittest/empty1.txt
  67. 0  ipc/chromium/src/base/data/file_util_unittest/empty2.txt
  68. +1 −0  ipc/chromium/src/base/data/file_util_unittest/original.txt
  69. +1 −0  ipc/chromium/src/base/data/file_util_unittest/same.txt
  70. +1 −0  ipc/chromium/src/base/data/file_util_unittest/same_length.txt
  71. +1 −0  ipc/chromium/src/base/data/file_util_unittest/shortened.txt
  72. BIN  ipc/chromium/src/base/data/file_version_info_unittest/FileVersionInfoTest1.dll
  73. BIN  ipc/chromium/src/base/data/file_version_info_unittest/FileVersionInfoTest2.dll
  74. +30 −0 ipc/chromium/src/base/data/purify/base_unittests.exe.gtest.txt
  75. +97 −0 ipc/chromium/src/base/data/purify/base_unittests.exe_MLK.txt
  76. +18 −0 ipc/chromium/src/base/data/purify/base_unittests.exe_MLK_flakey.txt
  77. +93 −0 ipc/chromium/src/base/data/purify/base_unittests.exe_MLK_ignore.txt
  78. +8 −0 ipc/chromium/src/base/data/purify/base_unittests.exe_PAR_ignore.txt
  79. 0  ipc/chromium/src/base/data/purify/base_unittests.exe_UMR.txt
  80. +8 −0 ipc/chromium/src/base/data/valgrind/base_unittests.gtest.txt
  81. +115 −0 ipc/chromium/src/base/data_pack.cc
  82. +48 −0 ipc/chromium/src/base/data_pack.h
  83. +41 −0 ipc/chromium/src/base/data_pack_unittest.cc
  84. +17 −0 ipc/chromium/src/base/debug_message.cc
  85. +65 −0 ipc/chromium/src/base/debug_on_start.cc
  86. +67 −0 ipc/chromium/src/base/debug_on_start.h
  87. +26 −0 ipc/chromium/src/base/debug_util.cc
  88. +70 −0 ipc/chromium/src/base/debug_util.h
  89. +35 −0 ipc/chromium/src/base/debug_util_mac.cc
  90. +156 −0 ipc/chromium/src/base/debug_util_posix.cc
  91. +77 −0 ipc/chromium/src/base/debug_util_unittest.cc
  92. +289 −0 ipc/chromium/src/base/debug_util_win.cc
  93. +55 −0 ipc/chromium/src/base/directory_watcher.h
  94. +321 −0 ipc/chromium/src/base/directory_watcher_inotify.cc
  95. +121 −0 ipc/chromium/src/base/directory_watcher_mac.cc
  96. +20 −0 ipc/chromium/src/base/directory_watcher_stub.cc
  97. +406 −0 ipc/chromium/src/base/directory_watcher_unittest.cc
  98. +82 −0 ipc/chromium/src/base/directory_watcher_win.cc
  99. +33 −0 ipc/chromium/src/base/eintr_wrapper.h
  100. +259 −0 ipc/chromium/src/base/event_recorder.cc
  101. +102 −0 ipc/chromium/src/base/event_recorder.h
  102. +28 −0 ipc/chromium/src/base/event_recorder_stubs.cc
  103. +113 −0 ipc/chromium/src/base/field_trial.cc
  104. +192 −0 ipc/chromium/src/base/field_trial.h
  105. +116 −0 ipc/chromium/src/base/field_trial_unittest.cc
  106. +36 −0 ipc/chromium/src/base/file_descriptor_posix.h
  107. +80 −0 ipc/chromium/src/base/file_descriptor_shuffle.cc
  108. +76 −0 ipc/chromium/src/base/file_descriptor_shuffle.h
  109. +289 −0 ipc/chromium/src/base/file_descriptor_shuffle_unittest.cc
  110. +318 −0 ipc/chromium/src/base/file_path.cc
  111. +268 −0 ipc/chromium/src/base/file_path.h
  112. +534 −0 ipc/chromium/src/base/file_path_unittest.cc
  113. +429 −0 ipc/chromium/src/base/file_util.cc
  114. +522 −0 ipc/chromium/src/base/file_util.h
  115. +90 −0 ipc/chromium/src/base/file_util_icu.cc
  116. +78 −0 ipc/chromium/src/base/file_util_linux.cc
  117. +33 −0 ipc/chromium/src/base/file_util_mac.mm
  118. +642 −0 ipc/chromium/src/base/file_util_posix.cc
  119. +1,096 −0 ipc/chromium/src/base/file_util_unittest.cc
  120. +817 −0 ipc/chromium/src/base/file_util_win.cc
  121. +185 −0 ipc/chromium/src/base/file_version_info.cc
  122. +96 −0 ipc/chromium/src/base/file_version_info.h
  123. +86 −0 ipc/chromium/src/base/file_version_info_linux.cc
  124. +26 −0 ipc/chromium/src/base/file_version_info_linux.h.version
  125. +131 −0 ipc/chromium/src/base/file_version_info_mac.mm
  126. +134 −0 ipc/chromium/src/base/file_version_info_unittest.cc
  127. +75 −0 ipc/chromium/src/base/fix_wp64.h
  128. +25 −0 ipc/chromium/src/base/float_util.h
  129. +37 −0 ipc/chromium/src/base/foundation_utils_mac.h
  130. +4 −0 ipc/chromium/src/base/gfx/DEPS
  131. +79 −0 ipc/chromium/src/base/gfx/gdi_util.cc
  132. +36 −0 ipc/chromium/src/base/gfx/gdi_util.h
  133. +145 −0 ipc/chromium/src/base/gfx/gtk_native_view_id_manager.cc
  134. +91 −0 ipc/chromium/src/base/gfx/gtk_native_view_id_manager.h
  135. +30 −0 ipc/chromium/src/base/gfx/gtk_util.cc
  136. +57 −0 ipc/chromium/src/base/gfx/gtk_util.h
  137. +523 −0 ipc/chromium/src/base/gfx/jpeg_codec.cc
  138. +59 −0 ipc/chromium/src/base/gfx/jpeg_codec.h
  139. +147 −0 ipc/chromium/src/base/gfx/jpeg_codec_unittest.cc
  140. +710 −0 ipc/chromium/src/base/gfx/native_theme.cc
  141. +296 −0 ipc/chromium/src/base/gfx/native_theme.h
  142. +11 −0 ipc/chromium/src/base/gfx/native_theme_unittest.cc
  143. +112 −0 ipc/chromium/src/base/gfx/native_widget_types.h
  144. +16 −0 ipc/chromium/src/base/gfx/native_widget_types_gtk.cc
  145. +7 −0 ipc/chromium/src/base/gfx/platform_canvas.h
  146. +12 −0 ipc/chromium/src/base/gfx/platform_canvas_linux.h
  147. +12 −0 ipc/chromium/src/base/gfx/platform_canvas_mac.h
  148. +12 −0 ipc/chromium/src/base/gfx/platform_device_linux.h
  149. +12 −0 ipc/chromium/src/base/gfx/platform_device_mac.h
  150. +202 −0 ipc/chromium/src/base/gfx/png_codec_unittest.cc
  151. +354 −0 ipc/chromium/src/base/gfx/png_decoder.cc
  152. +63 −0 ipc/chromium/src/base/gfx/png_decoder.h
  153. +205 −0 ipc/chromium/src/base/gfx/png_encoder.cc
  154. +68 −0 ipc/chromium/src/base/gfx/png_encoder.h
  155. +44 −0 ipc/chromium/src/base/gfx/point.cc
  156. +77 −0 ipc/chromium/src/base/gfx/point.h
  157. +226 −0 ipc/chromium/src/base/gfx/rect.cc
  158. +162 −0 ipc/chromium/src/base/gfx/rect.h
  159. +280 −0 ipc/chromium/src/base/gfx/rect_unittest.cc
  160. +53 −0 ipc/chromium/src/base/gfx/size.cc
  161. +76 −0 ipc/chromium/src/base/gfx/size.h
  162. +107 −0 ipc/chromium/src/base/hash_tables.h
  163. +794 −0 ipc/chromium/src/base/histogram.cc
  164. +558 −0 ipc/chromium/src/base/histogram.h
  165. +293 −0 ipc/chromium/src/base/histogram_unittest.cc
  166. +56 −0 ipc/chromium/src/base/hmac.h
  167. +69 −0 ipc/chromium/src/base/hmac_mac.cc
  168. +134 −0 ipc/chromium/src/base/hmac_nss.cc
  169. +164 −0 ipc/chromium/src/base/hmac_unittest.cc
  170. +131 −0 ipc/chromium/src/base/hmac_win.cc
  171. +240 −0 ipc/chromium/src/base/iat_patch.cc
  172. +122 −0 ipc/chromium/src/base/iat_patch.h
  173. +92 −0 ipc/chromium/src/base/icu_util.cc
  174. +16 −0 ipc/chromium/src/base/icu_util.h
  175. +93 −0 ipc/chromium/src/base/id_map.h
  176. +161 −0 ipc/chromium/src/base/idle_timer.cc
  177. +97 −0 ipc/chromium/src/base/idle_timer.h
  178. +25 −0 ipc/chromium/src/base/idle_timer_none.cc
  179. +240 −0 ipc/chromium/src/base/idletimer_unittest.cc
  180. +72 −0 ipc/chromium/src/base/image_util.cc
  181. +66 −0 ipc/chromium/src/base/image_util.h
  182. +641 −0 ipc/chromium/src/base/json_reader.cc
  183. +186 −0 ipc/chromium/src/base/json_reader.h
  184. +492 −0 ipc/chromium/src/base/json_reader_unittest.cc
  185. +175 −0 ipc/chromium/src/base/json_writer.cc
  186. +47 −0 ipc/chromium/src/base/json_writer.h
  187. +68 −0 ipc/chromium/src/base/json_writer_unittest.cc
  188. +16 −0 ipc/chromium/src/base/keyboard_codes.h
  189. +208 −0 ipc/chromium/src/base/keyboard_codes_posix.h
  190. +184 −0 ipc/chromium/src/base/keyboard_codes_win.h
  191. +34 −0 ipc/chromium/src/base/lazy_instance.cc
  192. +110 −0 ipc/chromium/src/base/lazy_instance.h
  193. +100 −0 ipc/chromium/src/base/lazy_instance_unittest.cc
  194. +174 −0 ipc/chromium/src/base/linked_ptr.h
  195. +110 −0 ipc/chromium/src/base/linked_ptr_unittest.cc
  196. +31 −0 ipc/chromium/src/base/linux_util.cc
  197. +19 −0 ipc/chromium/src/base/linux_util.h
  198. +7 −0 ipc/chromium/src/base/lock.cc
  199. +75 −0 ipc/chromium/src/base/lock.h
  200. +77 −0 ipc/chromium/src/base/lock_impl.h
  201. +48 −0 ipc/chromium/src/base/lock_impl_posix.cc
  202. +73 −0 ipc/chromium/src/base/lock_impl_win.cc
  203. +568 −0 ipc/chromium/src/base/logging.cc
  204. +644 −0 ipc/chromium/src/base/logging.h
  205. +39 −0 ipc/chromium/src/base/mac_util.h
  206. +69 −0 ipc/chromium/src/base/mac_util.mm
  207. +20 −0 ipc/chromium/src/base/mac_util_unittest.cc
Sorry, we could not display the entire diff because too many files (783) changed.
View
6 ipc/chromium/src/base/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+third_party/zlib",
+ "+third_party/libevent",
+ "+third_party/libjpeg",
+ "+third_party/dmg_fp",
+]
View
67 ipc/chromium/src/base/at_exit.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers. We always operate on the most
+// recent, and we should never have more than one outside of testing, when we
+// use the shadow version of the constructor. We don't protect this for
+// thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = NULL;
+
+AtExitManager::AtExitManager() : next_manager_(NULL) {
+ DCHECK(!g_top_manager);
+ g_top_manager = this;
+}
+
+AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+ DCHECK(shadow || !g_top_manager);
+ g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+ return;
+ }
+ DCHECK(g_top_manager == this);
+
+ ProcessCallbacksNow();
+ g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+ return;
+ }
+
+ DCHECK(func);
+
+ AutoLock lock(g_top_manager->lock_);
+ g_top_manager->stack_.push(CallbackAndParam(func, param));
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+ return;
+ }
+
+ AutoLock lock(g_top_manager->lock_);
+
+ while (!g_top_manager->stack_.empty()) {
+ CallbackAndParam callback_and_param = g_top_manager->stack_.top();
+ g_top_manager->stack_.pop();
+
+ callback_and_param.func_(callback_and_param.param_);
+ }
+}
+
+} // namespace base
View
71 ipc/chromium/src/base/at_exit.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include <stack>
+
+#include "base/basictypes.h"
+#include "base/lock.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+// base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class AtExitManager {
+ protected:
+ // This constructor will allow this instance of AtExitManager to be created
+ // even if one already exists. This should only be used for testing!
+ // AtExitManagers are kept on a global stack, and it will be removed during
+ // destruction. This allows you to shadow another AtExitManager.
+ AtExitManager(bool shadow);
+
+ public:
+ typedef void (*AtExitCallbackType)(void*);
+
+ AtExitManager();
+
+ // The dtor calls all the registered callbacks. Do not try to register more
+ // callbacks after this point.
+ ~AtExitManager();
+
+ // Registers the specified function to be called at exit. The prototype of
+ // the callback function is void func().
+ static void RegisterCallback(AtExitCallbackType func, void* param);
+
+ // Calls the functions registered with RegisterCallback in LIFO order. It
+ // is possible to register new callbacks after calling this function.
+ static void ProcessCallbacksNow();
+
+ private:
+ struct CallbackAndParam {
+ CallbackAndParam(AtExitCallbackType func, void* param)
+ : func_(func), param_(param) { }
+ AtExitCallbackType func_;
+ void* param_;
+ };
+
+ Lock lock_;
+ std::stack<CallbackAndParam> stack_;
+ AtExitManager* next_manager_; // Stack of managers to allow shadowing.
+
+ DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+} // namespace base
+
+#endif // BASE_AT_EXIT_H_
View
85 ipc/chromium/src/base/at_exit_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Don't test the global AtExitManager, because asking it to process its
+// AtExit callbacks can ruin the global state that other tests may depend on.
+class ShadowingAtExitManager : public base::AtExitManager {
+ public:
+ ShadowingAtExitManager() : AtExitManager(true) {}
+};
+
+int g_test_counter_1 = 0;
+int g_test_counter_2 = 0;
+
+void IncrementTestCounter1(void* unused) {
+ ++g_test_counter_1;
+}
+
+void IncrementTestCounter2(void* unused) {
+ ++g_test_counter_2;
+}
+
+void ZeroTestCounters() {
+ g_test_counter_1 = 0;
+ g_test_counter_2 = 0;
+}
+
+void ExpectCounter1IsZero(void* unused) {
+ EXPECT_EQ(0, g_test_counter_1);
+}
+
+void ExpectParamIsNull(void* param) {
+ EXPECT_EQ(static_cast<void*>(NULL), param);
+}
+
+void ExpectParamIsCounter(void* param) {
+ EXPECT_EQ(&g_test_counter_1, param);
+}
+
+} // namespace
+
+TEST(AtExitTest, Basic) {
+ ShadowingAtExitManager shadowing_at_exit_manager;
+
+ ZeroTestCounters();
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+
+ EXPECT_EQ(0, g_test_counter_1);
+ EXPECT_EQ(0, g_test_counter_2);
+ base::AtExitManager::ProcessCallbacksNow();
+ EXPECT_EQ(2, g_test_counter_1);
+ EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST(AtExitTest, LIFOOrder) {
+ ShadowingAtExitManager shadowing_at_exit_manager;
+
+ ZeroTestCounters();
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+ base::AtExitManager::RegisterCallback(&ExpectCounter1IsZero, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
+
+ EXPECT_EQ(0, g_test_counter_1);
+ EXPECT_EQ(0, g_test_counter_2);
+ base::AtExitManager::ProcessCallbacksNow();
+ EXPECT_EQ(1, g_test_counter_1);
+ EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST(AtExitTest, Param) {
+ ShadowingAtExitManager shadowing_at_exit_manager;
+
+ base::AtExitManager::RegisterCallback(&ExpectParamIsNull, NULL);
+ base::AtExitManager::RegisterCallback(&ExpectParamIsCounter,
+ &g_test_counter_1);
+ base::AtExitManager::ProcessCallbacksNow();
+}
View
63 ipc/chromium/src/base/atomic_ref_count.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting. Please use base/ref_counted.h directly instead.
+
+#ifndef BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ATOMIC_REF_COUNT_H_
+
+#include "base/atomicops.h"
+
+namespace base {
+
+typedef subtle::Atomic32 AtomicRefCount;
+
+// Increment a reference count by "increment", which must exceed 0.
+inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
+ AtomicRefCount increment) {
+ subtle::NoBarrier_AtomicIncrement(ptr, increment);
+}
+
+// Decrement a reference count by "decrement", which must exceed 0,
+// and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDecN(volatile AtomicRefCount *ptr,
+ AtomicRefCount decrement) {
+ return subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0;
+}
+
+// Increment a reference count by 1.
+inline void AtomicRefCountInc(volatile AtomicRefCount *ptr) {
+ base::AtomicRefCountIncN(ptr, 1);
+}
+
+// Decrement a reference count by 1 and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDec(volatile AtomicRefCount *ptr) {
+ return base::AtomicRefCountDecN(ptr, 1);
+}
+
+// Return whether the reference count is one. If the reference count is used
+// in the conventional way, a refrerence count of 1 implies that the current
+// thread owns the reference and no other thread shares it. This call performs
+// the test for a reference count of one, and performs the memory barrier
+// needed for the owning thread to act on the object, knowing that it has
+// exclusive access to the object.
+inline bool AtomicRefCountIsOne(volatile AtomicRefCount *ptr) {
+ return subtle::Acquire_Load(ptr) == 1;
+}
+
+// Return whether the reference count is zero. With conventional object
+// referencing counting, the object will be destroyed, so the reference count
+// should never be zero. Hence this is generally used for a debug check.
+inline bool AtomicRefCountIsZero(volatile AtomicRefCount *ptr) {
+ return subtle::Acquire_Load(ptr) == 0;
+}
+
+} // namespace base
+
+#endif // BASE_ATOMIC_REF_COUNT_H_
View
30 ipc/chromium/src/base/atomic_sequence_num.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+
+namespace base {
+
+class AtomicSequenceNumber {
+ public:
+ AtomicSequenceNumber() : seq_(0) { }
+ explicit AtomicSequenceNumber(base::LinkerInitialized x) { /* seq_ is 0 */ }
+
+ int GetNext() {
+ return static_cast<int>(
+ base::subtle::NoBarrier_AtomicIncrement(&seq_, 1) - 1);
+ }
+
+ private:
+ base::subtle::Atomic32 seq_;
+ DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+} // namespace base
+
+#endif // BASE_ATOMIC_SEQUENCE_NUM_H_
View
139 ipc/chromium/src/base/atomicops.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle. If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain. If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative. You should assume only properties explicitly guaranteed by the
+// specifications in this file. You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break. If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines. The NoBarrier
+// versions are provided when no barriers are needed:
+// NoBarrier_Store()
+// NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace base {
+namespace subtle {
+
+// Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting.
+#ifndef OS_WIN
+#define __w64
+#endif
+typedef __w64 int32 Atomic32;
+#ifdef CPU_ARCH_64_BITS
+typedef int64 Atomic64;
+#endif
+
+// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef CPU_ARCH_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif // CPU_ARCH_64_BITS
+
+} // namespace base::subtle
+} // namespace base
+
+// Include our platform specific implementation.
+#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_msvc.h"
+#elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_macosx.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
+#include "base/atomicops_internals_arm_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+#endif // BASE_ATOMICOPS_H_
View
124 ipc/chromium/src/base/atomicops_internals_arm_gcc.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace base {
+namespace subtle {
+
+// 0xffff0fc0 is the hard coded address of a function provided by
+// the kernel which implements an atomic compare-exchange. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr);
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
+ (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+ (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value = *ptr;
+ do {
+ if (!pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ // Atomic exchange the old value with an incremented one.
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr)) == 0) {
+ // The exchange took place as expected.
+ return new_value;
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ pLinuxKernelMemoryBarrier();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
View
104 ipc/chromium/src/base/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,104 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file. If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction. In PIC compilations, %ebx contains the address
+// of the global offset table. To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+ asm ("mov %%ebx, %%edi\n" \
+ "cpuid\n" \
+ "xchg %%edi, %%ebx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined (__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+ asm ("mov %%rbx, %%rdi\n" \
+ "cpuid\n" \
+ "xchg %%rdi, %%rbx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid) // initialize the struct only on x86
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+ false, // bug can't exist before process spawns multiple threads
+ false, // no SSE2
+};
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+static void AtomicOps_Internalx86CPUFeaturesInit() {
+ uint32 eax;
+ uint32 ebx;
+ uint32 ecx;
+ uint32 edx;
+
+ // Get vendor string (issue CPUID with eax = 0)
+ cpuid(eax, ebx, ecx, edx, 0);
+ char vendor[13];
+ memcpy(vendor, &ebx, 4);
+ memcpy(vendor + 4, &edx, 4);
+ memcpy(vendor + 8, &ecx, 4);
+ vendor[12] = 0;
+
+ // get feature flags in ecx/edx, and family/model in eax
+ cpuid(eax, ebx, ecx, edx, 1);
+
+ int family = (eax >> 8) & 0xf; // family and model fields
+ int model = (eax >> 4) & 0xf;
+ if (family == 0xf) { // use extended family and model fields
+ family += (eax >> 20) & 0xff;
+ model += ((eax >> 16) & 0xf) << 4;
+ }
+
+ // Opteron Rev E has a bug in which on very rare occasions a locked
+ // instruction doesn't act as a read-acquire barrier if followed by a
+ // non-locked read-modify-write instruction. Rev F has this bug in
+ // pre-release versions, but not in versions released to customers,
+ // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+ if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
+ family == 15 &&
+ 32 <= model && model <= 63) {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+ } else {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+ }
+
+ // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+ AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+namespace {
+
+class AtomicOpsx86Initializer {
+ public:
+ AtomicOpsx86Initializer() {
+ AtomicOps_Internalx86CPUFeaturesInit();
+ }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+} // namespace
+
+#endif // if x86
+
+#endif // ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
View
248 ipc/chromium/src/base/atomicops_internals_x86_gcc.h
@@ -0,0 +1,248 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace base {
+namespace subtle {
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else { // mfence is faster but not present on PIII
+ Atomic32 x = 0;
+ NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
+ }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ *ptr = value;
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier on PIII
+ }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+ *ptr = value; // An x86 store acts as a release barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+
+ *ptr = value; // An x86 store acts as a release barrier
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Acquire_Load(), below.
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+ //
+ // x86 stores/loads fail to act as barriers for a few instructions (clflush
+ // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+ // not generated by the compiler, and are rare. Users of these instructions
+ // need to know about cache behaviour in any case since all of these involve
+ // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Release_Store(), above.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+#endif // defined(__x86_64__)
+
+} // namespace base::subtle
+} // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
View
279 ipc/chromium/src/base/atomicops_internals_x86_macosx.h
@@ -0,0 +1,279 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+ OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64(old_value, new_value,
+ const_cast<Atomic64*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+ const_cast<Atomic64*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
+ const_cast<Atomic64*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ // The lib kern interface does not distinguish between
+ // Acquire and Release memory barriers; they are equivalent.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(__LP64__)
+
+// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
+// on the Mac, even when they are the same size. We need to explicitly cast
+// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
+#ifdef __LP64__
+#define AtomicWordCastType Atomic64
+#else
+#define AtomicWordCastType Atomic32
+#endif
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Release_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Acquire_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Release_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Acquire_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Release_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+#undef AtomicWordCastType
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
View
167 ipc/chromium/src/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include <windows.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ LONG result = InterlockedCompareExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value),
+ static_cast<LONG>(old_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ LONG result = InterlockedExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return InterlockedExchangeAdd(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+ // We use MemoryBarrier from WinNT.h
+ ::MemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+ // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ PVOID result = InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ PVOID result = InterlockedExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return InterlockedExchangeAdd64(
+ reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(_WIN64)
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
View
237 ipc/chromium/src/base/atomicops_unittest.cc
@@ -0,0 +1,237 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/atomicops.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+ // For now, we just test single threaded execution
+
+ // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
+ // outside the expected address bounds. This is in particular to
+ // test that some future change to the asm code doesn't cause the
+ // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+ // machines.
+ struct {
+ AtomicType prev_word;
+ AtomicType count;
+ AtomicType next_word;
+ } s;
+
+ AtomicType prev_word_value, next_word_value;
+ memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+ memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+ s.prev_word = prev_word_value;
+ s.count = 0;
+ s.next_word = next_word_value;
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
+ EXPECT_EQ(s.count, 6);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
+ EXPECT_EQ(s.count, -1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
+ EXPECT_EQ(s.count, -5);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+}
+
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+ AtomicType value = 0;
+ AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, prev);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (GG_ULONGLONG(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, prev);
+
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+ AtomicType value = 0;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, new_value);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (GG_ULONGLONG(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, new_value);
+
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+ // Test at rollover boundary between int_max and int_min
+ AtomicType test_val = (GG_ULONGLONG(1) <<
+ (NUM_BITS(AtomicType) - 1));
+ AtomicType value = -1 ^ test_val;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ EXPECT_EQ(test_val, value);
+ EXPECT_EQ(value, new_value);
+
+ base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ EXPECT_EQ(-1 ^ test_val, value);
+
+ // Test at 32-bit boundary for 64-bit atomic type.
+ test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
+ value = test_val - 1;
+ new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ EXPECT_EQ(test_val, value);
+ EXPECT_EQ(value, new_value);
+
+ base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ EXPECT_EQ(test_val - 1, value);
+}
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+ AtomicType val = 0;
+ memset(&val, 0xa5, sizeof(AtomicType));
+ return val;
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestStore() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ base::subtle::NoBarrier_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::NoBarrier_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Acquire_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Acquire_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Release_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Release_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestLoad() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
+}
+
+TEST(AtomicOpsTest, Inc) {
+ TestAtomicIncrement<base::subtle::Atomic32>();
+ TestAtomicIncrement<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, CompareAndSwap) {
+ TestCompareAndSwap<base::subtle::Atomic32>();
+ TestCompareAndSwap<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Exchange) {
+ TestAtomicExchange<base::subtle::Atomic32>();
+ TestAtomicExchange<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, IncrementBounds) {
+ TestAtomicIncrementBounds<base::subtle::Atomic32>();
+ TestAtomicIncrementBounds<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Store) {
+ TestStore<base::subtle::Atomic32>();
+ TestStore<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Load) {
+ TestLoad<base::subtle::Atomic32>();
+ TestLoad<base::subtle::AtomicWord>();
+}
View
724 ipc/chromium/src/base/base.gyp
@@ -0,0 +1,724 @@
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../build/common.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'base',
+ 'type': '<(library)',
+ 'dependencies': [
+ '../third_party/icu38/icu38.gyp:icui18n',
+ '../third_party/icu38/icu38.gyp:icuuc',
+ ],
+ 'msvs_guid': '1832A374-8A74-4F9E-B536-69A699B3E165',
+ 'sources': [
+ '../build/build_config.h',
+ 'crypto/cssm_init.cc',
+ 'crypto/cssm_init.h',
+ 'crypto/signature_verifier.h',
+ 'crypto/signature_verifier_mac.cc',
+ 'crypto/signature_verifier_nss.cc',
+ 'crypto/signature_verifier_win.cc',
+ 'third_party/dmg_fp/dmg_fp.h',
+ 'third_party/dmg_fp/dtoa.cc',
+ 'third_party/dmg_fp/g_fmt.cc',
+ 'third_party/nspr/prcpucfg.h',
+ 'third_party/nspr/prcpucfg_win.h',
+ 'third_party/nspr/prtime.cc',
+ 'third_party/nspr/prtime.h',
+ 'third_party/nspr/prtypes.h',
+ 'third_party/nss/blapi.h',
+ 'third_party/nss/blapit.h',
+ 'third_party/nss/sha256.h',
+ 'third_party/nss/sha512.cc',
+ 'third_party/purify/pure.h',
+ 'third_party/purify/pure_api.c',
+ 'atomicops_internals_x86_gcc.cc',
+ 'at_exit.cc',
+ 'at_exit.h',
+ 'atomic_ref_count.h',
+ 'atomic_sequence_num.h',
+ 'atomicops.h',
+ 'atomicops_internals_x86_msvc.h',
+ 'base_drag_source.cc',
+ 'base_drag_source.h',
+ 'base_drop_target.cc',
+ 'base_drop_target.h',
+ 'base_paths.cc',
+ 'base_paths.h',
+ 'base_paths_linux.h',
+ 'base_paths_linux.cc',
+ 'base_paths_mac.h',
+ 'base_paths_mac.mm',
+ 'base_paths_win.cc',
+ 'base_paths_win.h',
+ 'base_switches.cc',
+ 'base_switches.h',
+ 'basictypes.h',
+ 'bzip2_error_handler.cc',
+ 'clipboard.cc',
+ 'clipboard.h',
+ 'clipboard_linux.cc',
+ 'clipboard_mac.mm',
+ 'clipboard_util.cc',
+ 'clipboard_util.h',
+ 'clipboard_win.cc',
+ 'command_line.cc',
+ 'command_line.h',
+ 'compiler_specific.h',
+ 'condition_variable.h',
+ 'condition_variable_posix.cc',
+ 'condition_variable_win.cc',
+ 'cpu.cc',
+ 'cpu.h',
+ 'data_pack.cc',
+ 'debug_on_start.cc',
+ 'debug_on_start.h',
+ 'debug_util.cc',
+ 'debug_util.h',
+ 'debug_util_mac.cc',
+ 'debug_util_posix.cc',
+ 'debug_util_win.cc',
+ 'directory_watcher.h',
+ 'directory_watcher_inotify.cc',
+ 'directory_watcher_mac.cc',
+ 'directory_watcher_win.cc',
+ 'event_recorder.cc',
+ 'event_recorder.h',
+ 'event_recorder_stubs.cc',
+ 'field_trial.cc',
+ 'field_trial.h',
+ 'file_descriptor_shuffle.cc',
+ 'file_descriptor_shuffle.h',
+ 'file_path.cc',
+ 'file_path.h',
+ 'file_util.cc',
+ 'file_util.h',
+ 'file_util_icu.cc',
+ 'file_util_linux.cc',
+ 'file_util_mac.mm',
+ 'file_util_posix.cc',
+ 'file_util_win.cc',
+ 'file_version_info.cc',
+ 'file_version_info.h',
+ 'file_version_info_linux.cc',
+ 'file_version_info_mac.mm',
+ 'fix_wp64.h',
+ 'float_util.h',
+ 'foundation_utils_mac.h',
+ 'hash_tables.h',
+ 'histogram.cc',
+ 'histogram.h',
+ 'hmac.h',
+ 'hmac_mac.cc',
+ 'hmac_nss.cc',
+ 'hmac_win.cc',
+ 'iat_patch.cc',
+ 'iat_patch.h',
+ 'icu_util.cc',
+ 'icu_util.h',
+ 'id_map.h',
+ 'idle_timer.cc',
+ 'idle_timer.h',
+ 'idle_timer_none.cc',
+ 'image_util.cc',
+ 'image_util.h',
+ 'json_reader.cc',
+ 'json_reader.h',
+ 'json_writer.cc',
+ 'json_writer.h',
+ 'keyboard_codes.h',
+ 'keyboard_codes_win.h',
+ 'lazy_instance.cc',
+ 'lazy_instance.h',
+ 'linked_ptr.h',
+ 'linux_util.cc',
+ 'linux_util.h',
+ 'lock.cc',
+ 'lock.h',
+ 'lock_impl.h',
+ 'lock_impl_posix.cc',
+ 'lock_impl_win.cc',
+ 'logging.cc',
+ 'logging.h',
+ 'mac_util.h',
+ 'mac_util.mm',
+ 'md5.cc',
+ 'md5.h',
+ 'memory_debug.cc',
+ 'memory_debug.h',
+ 'message_loop.cc',
+ 'message_loop.h',
+ 'message_pump.h',
+ 'message_pump_default.cc',
+ 'message_pump_default.h',
+ 'message_pump_glib.cc',
+ 'message_pump_glib.h',
+ 'message_pump_libevent.cc',
+ 'message_pump_libevent.h',
+ 'message_pump_mac.h',
+ 'message_pump_mac.mm',
+ 'message_pump_win.cc',
+ 'message_pump_win.h',
+ 'native_library.h',
+ 'native_library_linux.cc',
+ 'native_library_mac.mm',
+ 'native_library_win.cc',
+ 'non_thread_safe.cc',
+ 'non_thread_safe.h',
+ 'nss_init.cc',
+ 'nss_init.h',
+ 'object_watcher.cc',
+ 'object_watcher.h',
+ 'observer_list.h',
+ 'observer_list_threadsafe.h',
+ 'path_service.cc',
+ 'path_service.h',
+ 'pe_image.cc',
+ 'pe_image.h',
+ 'pickle.cc',
+ 'pickle.h',
+ 'platform_file.h',
+ 'platform_file_win.cc',
+ 'platform_file_posix.cc',
+ 'platform_thread.h',
+ 'platform_thread_mac.mm',
+ 'platform_thread_posix.cc',
+ 'platform_thread_win.cc',
+ 'port.h',
+ 'profiler.cc',
+ 'profiler.h',
+ 'process.h',
+ 'process_posix.cc',
+ 'process_util.h',
+ 'process_util_linux.cc',
+ 'process_util_mac.mm',
+ 'process_util_posix.cc',
+ 'process_util_win.cc',
+ 'process_win.cc',
+ 'rand_util.cc',
+ 'rand_util.h',
+ 'rand_util_posix.cc',
+ 'rand_util_win.cc',
+ 'ref_counted.cc',
+ 'ref_counted.h',
+ 'registry.cc',
+ 'registry.h',
+ 'resource_util.cc',
+ 'resource_util.h',
+ 'revocable_store.cc',
+ 'revocable_store.h',
+ 'scoped_bstr_win.cc',
+ 'scoped_bstr_win.h',
+ 'scoped_cftyperef.h',
+ 'scoped_clipboard_writer.cc',
+ 'scoped_clipboard_writer.h',
+ 'scoped_comptr_win.h',
+ 'scoped_handle.h',
+ 'scoped_handle_win.h',
+ 'scoped_nsautorelease_pool.h',
+ 'scoped_nsautorelease_pool.mm',
+ 'scoped_nsobject.h',
+ 'scoped_ptr.h',
+ 'scoped_temp_dir.cc',
+ 'scoped_temp_dir.h',
+ 'scoped_variant_win.cc',
+ 'scoped_variant_win.h',
+ 'scoped_vector.h',
+ 'sha2.cc',
+ 'sha2.h',
+ 'shared_memory.h',
+ 'shared_memory_posix.cc',
+ 'shared_memory_win.cc',
+ 'simple_thread.cc',
+ 'simple_thread.h',
+ 'singleton.h',
+ 'spin_wait.h',
+ 'stack_container.h',
+ 'stats_counters.h',
+ 'stats_table.cc',
+ 'stats_table.h',
+ 'stl_util-inl.h',
+ 'string16.cc',
+ 'string16.h',
+ 'string_escape.cc',
+ 'string_escape.h',
+ 'string_piece.cc',
+ 'string_piece.h',
+ 'string_tokenizer.h',
+ 'string_util.cc',
+ 'string_util.h',
+ 'string_util_icu.cc',
+ 'string_util_win.h',
+ 'sys_info.h',
+ 'sys_info_mac.cc',
+ 'sys_info_posix.cc',
+ 'sys_info_win.cc',
+ 'sys_string_conversions.h',
+ 'sys_string_conversions_linux.cc',
+ 'sys_string_conversions_mac.mm',
+ 'sys_string_conversions_win.cc',
+ 'system_monitor.cc',
+ 'system_monitor.h',
+ 'system_monitor_posix.cc',
+ 'system_monitor_win.cc',
+ 'task.h',
+ 'test_file_util.h',
+ 'test_file_util_linux.cc',
+ 'test_file_util_mac.cc',
+ 'test_file_util_posix.cc',
+ 'test_file_util_win.cc',
+ 'thread.cc',
+ 'thread.h',
+ 'thread_collision_warner.cc',
+ 'thread_collision_warner.h',
+ 'thread_local.h',
+ 'thread_local_posix.cc',
+ 'thread_local_storage.h',
+ 'thread_local_storage_posix.cc',
+ 'thread_local_storage_win.cc',
+ 'thread_local_win.cc',
+ 'time.cc',
+ 'time.h',
+ 'time_format.cc',
+ 'time_format.h',
+ 'time_mac.cc',
+ 'time_posix.cc',
+ 'time_win.cc',
+ 'timer.cc',
+ 'timer.h',
+ 'trace_event.cc',
+ 'trace_event.h',
+ 'tracked.cc',
+ 'tracked.h',
+ 'tracked_objects.cc',
+ 'tracked_objects.h',
+ 'tuple.h',
+ 'values.cc',
+ 'values.h',
+ 'version.cc',
+ 'version.h',
+ 'waitable_event.h',
+ 'waitable_event_posix.cc',
+ 'waitable_event_watcher.h',
+ 'waitable_event_watcher_posix.cc',
+ 'waitable_event_watcher_win.cc',
+ 'waitable_event_win.cc',
+ 'watchdog.cc',
+ 'watchdog.h',
+ 'win_util.cc',
+ 'win_util.h',
+ 'windows_message_list.h',
+ 'wmi_util.cc',
+ 'wmi_util.h',
+ 'word_iterator.cc',
+ 'word_iterator.h',
+ 'worker_pool.h',
+ 'worker_pool_linux.cc',
+ 'worker_pool_linux.h',
+ 'worker_pool_mac.mm',
+ 'worker_pool_win.cc',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ # These warnings are needed for the files in third_party\dmg_fp.
+ 'msvs_disabled_warnings': [
+ 4244, 4554, 4018, 4102,
+ ],
+ 'conditions': [
+ [ 'OS == "linux"', {
+ 'actions': [
+ {
+ 'action_name': 'linux_version',
+ 'variables': {
+ 'template_input_path': 'file_version_info_linux.h.version',
+ 'template_output_path':
+ '<(SHARED_INTERMEDIATE_DIR)/base/file_version_info_linux.h',
+ },
+ 'inputs': [
+ '<(template_input_path)',
+ '../chrome/VERSION',
+ '../chrome/tools/build/linux/version.sh',
+ ],
+ 'conditions': [
+ [ 'branding == "Chrome"', {
+ 'inputs': ['../chrome/app/theme/google_chrome/BRANDING']
+ }, { # else branding!="Chrome"
+ 'inputs': ['../chrome/app/theme/chromium/BRANDING']
+ }],
+ ],
+ 'outputs': [
+ # Use a non-existant output so this action always runs and
+ # generates version information, e.g. to capture revision
+ # changes, which aren't captured by file dependencies.
+ '<(SHARED_INTERMEDIATE_DIR)/base/file_version_info_linux.bogus',
+ ],
+ 'action': [
+ '../chrome/tools/build/linux/version.sh',
+ '<(template_input_path)', '<(template_output_path)',
+ ],
+ },
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'sources/': [ ['exclude', '_(mac|win)\\.cc$'],
+ ['exclude', '\\.mm?$' ] ],
+ 'sources!': [
+ # Linux has an implementation of idle_timer that depends
+ # on XScreenSaver, but it's unclear if we want it yet,
+ # so use idle_timer_none.cc instead.
+ 'idle_timer.cc',
+ ],
+ 'dependencies': [
+ '../build/linux/system.gyp:gtk',
+ '../build/linux/system.gyp:nss',
+ ],
+ 'cflags': [
+ '-Wno-write-strings',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ # We need rt for clock_gettime().
+ '-lrt',
+ ],
+ },
+ },
+ { # else: OS != "linux"
+ 'sources!': [
+ 'crypto/signature_verifier_nss.cc',
+ 'atomicops_internals_x86_gcc.cc',
+ 'directory_watcher_inotify.cc',
+ 'hmac_nss.cc',
+ 'idle_timer_none.cc',
+ 'linux_util.cc',
+ 'message_pump_glib.cc',
+ 'nss_init.cc',
+ 'nss_init.h',
+ 'time_posix.cc',
+ ],
+ }
+ ],
+ [ 'GENERATOR == "quentin"', {
+ # Quentin builds don't have a recent enough glibc to include the
+ # inotify headers
+ 'sources!': [
+ 'directory_watcher_inotify.cc',
+ ],
+ 'sources': [
+ 'directory_watcher_stub.cc',
+ ],
+ },
+ ],
+ [ 'OS == "mac"', {
+ 'sources/': [ ['exclude', '_(linux|win)\\.cc$'] ],
+ 'sources!': [
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+ ],
+ },
+ },
+ { # else: OS != "mac"
+ 'sources!': [
+ 'crypto/cssm_init.cc',
+ 'crypto/cssm_init.h',
+ ],
+ }
+ ],
+ [ 'OS == "win"', {
+ 'sources/': [ ['exclude', '_(linux|mac|posix)\\.cc$'],
+ ['exclude', '\\.mm?$' ] ],
+ 'sources!': [
+ 'data_pack.cc',
+ 'event_recorder_stubs.cc',