| @ -1,67 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/at_exit.h" | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| // Keep a stack of registered AtExitManagers. We always operate on the most | |||||
| // recent, and we should never have more than one outside of testing, when we | |||||
| // use the shadow version of the constructor. We don't protect this for | |||||
| // thread-safe access, since it will only be modified in testing. | |||||
| static AtExitManager* g_top_manager = NULL; | |||||
| AtExitManager::AtExitManager() : next_manager_(NULL) { | |||||
| DCHECK(!g_top_manager); | |||||
| g_top_manager = this; | |||||
| } | |||||
| AtExitManager::~AtExitManager() { | |||||
| if (!g_top_manager) { | |||||
| NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager"; | |||||
| return; | |||||
| } | |||||
| DCHECK(g_top_manager == this); | |||||
| ProcessCallbacksNow(); | |||||
| g_top_manager = next_manager_; | |||||
| } | |||||
| // static | |||||
| void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) { | |||||
| if (!g_top_manager) { | |||||
| NOTREACHED() << "Tried to RegisterCallback without an AtExitManager"; | |||||
| return; | |||||
| } | |||||
| DCHECK(func); | |||||
| AutoLock lock(g_top_manager->lock_); | |||||
| g_top_manager->stack_.push(CallbackAndParam(func, param)); | |||||
| } | |||||
| // static | |||||
| void AtExitManager::ProcessCallbacksNow() { | |||||
| if (!g_top_manager) { | |||||
| NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager"; | |||||
| return; | |||||
| } | |||||
| AutoLock lock(g_top_manager->lock_); | |||||
| while (!g_top_manager->stack_.empty()) { | |||||
| CallbackAndParam callback_and_param = g_top_manager->stack_.top(); | |||||
| g_top_manager->stack_.pop(); | |||||
| callback_and_param.func_(callback_and_param.param_); | |||||
| } | |||||
| } | |||||
| AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) { | |||||
| DCHECK(shadow || !g_top_manager); | |||||
| g_top_manager = this; | |||||
| } | |||||
| } // namespace base | |||||
| @ -1,79 +0,0 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_AT_EXIT_H_ | |||||
| #define BASE_AT_EXIT_H_ | |||||
| #pragma once | |||||
| #include <stack> | |||||
| #include "base/basictypes.h" | |||||
| #include "base/synchronization/lock.h" | |||||
| namespace base { | |||||
| // This class provides a facility similar to the CRT atexit(), except that | |||||
| // we control when the callbacks are executed. Under Windows for a DLL they | |||||
| // happen at a really bad time and under the loader lock. This facility is | |||||
| // mostly used by base::Singleton. | |||||
| // | |||||
| // The usage is simple. Early in the main() or WinMain() scope create an | |||||
| // AtExitManager object on the stack: | |||||
| // int main(...) { | |||||
| // base::AtExitManager exit_manager; | |||||
| // | |||||
| // } | |||||
| // When the exit_manager object goes out of scope, all the registered | |||||
| // callbacks and singleton destructors will be called. | |||||
| class AtExitManager { | |||||
| public: | |||||
| typedef void (*AtExitCallbackType)(void*); | |||||
| AtExitManager(); | |||||
| // The dtor calls all the registered callbacks. Do not try to register more | |||||
| // callbacks after this point. | |||||
| ~AtExitManager(); | |||||
| // Registers the specified function to be called at exit. The prototype of | |||||
| // the callback function is void func(). | |||||
| static void RegisterCallback(AtExitCallbackType func, void* param); | |||||
| // Calls the functions registered with RegisterCallback in LIFO order. It | |||||
| // is possible to register new callbacks after calling this function. | |||||
| static void ProcessCallbacksNow(); | |||||
| protected: | |||||
| // This constructor will allow this instance of AtExitManager to be created | |||||
| // even if one already exists. This should only be used for testing! | |||||
| // AtExitManagers are kept on a global stack, and it will be removed during | |||||
| // destruction. This allows you to shadow another AtExitManager. | |||||
| explicit AtExitManager(bool shadow); | |||||
| private: | |||||
| struct CallbackAndParam { | |||||
| CallbackAndParam(AtExitCallbackType func, void* param) | |||||
| : func_(func), param_(param) { } | |||||
| AtExitCallbackType func_; | |||||
| void* param_; | |||||
| }; | |||||
| base::Lock lock_; | |||||
| std::stack<CallbackAndParam> stack_; | |||||
| AtExitManager* next_manager_; // Stack of managers to allow shadowing. | |||||
| DISALLOW_COPY_AND_ASSIGN(AtExitManager); | |||||
| }; | |||||
| #if defined(UNIT_TEST) | |||||
| class ShadowingAtExitManager : public AtExitManager { | |||||
| public: | |||||
| ShadowingAtExitManager() : AtExitManager(true) {} | |||||
| }; | |||||
| #endif // defined(UNIT_TEST) | |||||
| } // namespace base | |||||
| #endif // BASE_AT_EXIT_H_ | |||||
| @ -1,148 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // For atomic operations on reference counts, see atomic_refcount.h. | |||||
| // For atomic operations on sequence numbers, see atomic_sequence_num.h. | |||||
| // The routines exported by this module are subtle. If you use them, even if | |||||
| // you get the code right, it will depend on careful reasoning about atomicity | |||||
| // and memory ordering; it will be less readable, and harder to maintain. If | |||||
| // you plan to use these routines, you should have a good reason, such as solid | |||||
| // evidence that performance would otherwise suffer, or there being no | |||||
| // alternative. You should assume only properties explicitly guaranteed by the | |||||
| // specifications in this file. You are almost certainly _not_ writing code | |||||
| // just for the x86; if you assume x86 semantics, x86 hardware bugs and | |||||
| // implementations on other archtectures will cause your code to break. If you | |||||
| // do not know what you are doing, avoid these routines, and use a Mutex. | |||||
| // | |||||
| // It is incorrect to make direct assignments to/from an atomic variable. | |||||
| // You should use one of the Load or Store routines. The NoBarrier | |||||
| // versions are provided when no barriers are needed: | |||||
| // NoBarrier_Store() | |||||
| // NoBarrier_Load() | |||||
| // Although there are currently no compiler enforcement, you are encouraged | |||||
| // to use these. | |||||
| // | |||||
| #ifndef BASE_ATOMICOPS_H_ | |||||
| #define BASE_ATOMICOPS_H_ | |||||
| #pragma once | |||||
| #include "base/basictypes.h" | |||||
| #include "base/port.h" | |||||
| namespace base { | |||||
| namespace subtle { | |||||
| // Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting. | |||||
| #ifndef OS_WIN | |||||
| #define __w64 | |||||
| #endif | |||||
| typedef __w64 int32 Atomic32; | |||||
| #ifdef ARCH_CPU_64_BITS | |||||
| // We need to be able to go between Atomic64 and AtomicWord implicitly. This | |||||
| // means Atomic64 and AtomicWord should be the same type on 64-bit. | |||||
| #if defined(OS_NACL) | |||||
| // NaCl's intptr_t is not actually 64-bits on 64-bit! | |||||
| // http://code.google.com/p/nativeclient/issues/detail?id=1162 | |||||
| typedef int64_t Atomic64; | |||||
| #else | |||||
| typedef intptr_t Atomic64; | |||||
| #endif | |||||
| #endif | |||||
| // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or | |||||
| // Atomic64 routines below, depending on your architecture. | |||||
| typedef intptr_t AtomicWord; | |||||
| // Atomically execute: | |||||
| // result = *ptr; | |||||
| // if (*ptr == old_value) | |||||
| // *ptr = new_value; | |||||
| // return result; | |||||
| // | |||||
| // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". | |||||
| // Always return the old value of "*ptr" | |||||
| // | |||||
| // This routine implies no memory barriers. | |||||
| Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value); | |||||
| // Atomically store new_value into *ptr, returning the previous value held in | |||||
| // *ptr. This routine implies no memory barriers. | |||||
| Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); | |||||
| // Atomically increment *ptr by "increment". Returns the new value of | |||||
| // *ptr with the increment applied. This routine implies no memory barriers. | |||||
| Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); | |||||
| Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment); | |||||
| // These following lower-level operations are typically useful only to people | |||||
| // implementing higher-level synchronization operations like spinlocks, | |||||
| // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or | |||||
| // a store with appropriate memory-ordering instructions. "Acquire" operations | |||||
| // ensure that no later memory access can be reordered ahead of the operation. | |||||
| // "Release" operations ensure that no previous memory access can be reordered | |||||
| // after the operation. "Barrier" operations have both "Acquire" and "Release" | |||||
| // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | |||||
| // access. | |||||
| Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value); | |||||
| Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value); | |||||
| void MemoryBarrier(); | |||||
| void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); | |||||
| void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); | |||||
| void Release_Store(volatile Atomic32* ptr, Atomic32 value); | |||||
| Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); | |||||
| Atomic32 Acquire_Load(volatile const Atomic32* ptr); | |||||
| Atomic32 Release_Load(volatile const Atomic32* ptr); | |||||
| // 64-bit atomic operations (only available on 64-bit processors). | |||||
| #ifdef ARCH_CPU_64_BITS | |||||
| Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value); | |||||
| Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); | |||||
| Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); | |||||
| Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); | |||||
| Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value); | |||||
| Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value); | |||||
| void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); | |||||
| void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); | |||||
| void Release_Store(volatile Atomic64* ptr, Atomic64 value); | |||||
| Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); | |||||
| Atomic64 Acquire_Load(volatile const Atomic64* ptr); | |||||
| Atomic64 Release_Load(volatile const Atomic64* ptr); | |||||
| #endif // ARCH_CPU_64_BITS | |||||
| } // namespace base::subtle | |||||
| } // namespace base | |||||
| // Include our platform specific implementation. | |||||
| #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) | |||||
| #include "base/atomicops_internals_x86_msvc.h" | |||||
| #elif defined(OS_MACOSX) | |||||
| #include "base/atomicops_internals_x86_macosx.h" | |||||
| #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) | |||||
| #include "base/atomicops_internals_x86_gcc.h" | |||||
| #elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY) | |||||
| #include "base/atomicops_internals_arm_gcc.h" | |||||
| #else | |||||
| #error "Atomic operations are not supported on your platform" | |||||
| #endif | |||||
| #endif // BASE_ATOMICOPS_H_ | |||||
| @ -1,125 +0,0 @@ | |||||
| // Copyright (c) 2009 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This file is an internal atomic implementation, use base/atomicops.h instead. | |||||
| // | |||||
| // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | |||||
| #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |||||
| #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |||||
| #pragma once | |||||
| namespace base { | |||||
| namespace subtle { | |||||
| // 0xffff0fc0 is the hard coded address of a function provided by | |||||
| // the kernel which implements an atomic compare-exchange. On older | |||||
| // ARM architecture revisions (pre-v6) this may be implemented using | |||||
| // a syscall. This address is stable, and in active use (hard coded) | |||||
| // by at least glibc-2.7 and the Android C library. | |||||
| typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | |||||
| Atomic32 new_value, | |||||
| volatile Atomic32* ptr); | |||||
| LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | |||||
| (LinuxKernelCmpxchgFunc) 0xffff0fc0; | |||||
| typedef void (*LinuxKernelMemoryBarrierFunc)(void); | |||||
| LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | |||||
| (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | |||||
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 prev_value = *ptr; | |||||
| do { | |||||
| if (!pLinuxKernelCmpxchg(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr))) { | |||||
| return old_value; | |||||
| } | |||||
| prev_value = *ptr; | |||||
| } while (prev_value == old_value); | |||||
| return prev_value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 old_value; | |||||
| do { | |||||
| old_value = *ptr; | |||||
| } while (pLinuxKernelCmpxchg(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr))); | |||||
| return old_value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| return Barrier_AtomicIncrement(ptr, increment); | |||||
| } | |||||
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| for (;;) { | |||||
| // Atomic exchange the old value with an incremented one. | |||||
| Atomic32 old_value = *ptr; | |||||
| Atomic32 new_value = old_value + increment; | |||||
| if (pLinuxKernelCmpxchg(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr)) == 0) { | |||||
| // The exchange took place as expected. | |||||
| return new_value; | |||||
| } | |||||
| // Otherwise, *ptr changed mid-loop and we need to retry. | |||||
| } | |||||
| } | |||||
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void MemoryBarrier() { | |||||
| pLinuxKernelMemoryBarrier(); | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| MemoryBarrier(); | |||||
| } | |||||
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| MemoryBarrier(); | |||||
| *ptr = value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |||||
| Atomic32 value = *ptr; | |||||
| MemoryBarrier(); | |||||
| return value; | |||||
| } | |||||
| inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| } // namespace base::subtle | |||||
| } // namespace base | |||||
| #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |||||
| @ -1,104 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This module gets enough CPU information to optimize the | |||||
| // atomicops module on x86. | |||||
| #include <string.h> | |||||
| #include "base/atomicops.h" | |||||
| #include "base/basictypes.h" | |||||
| // This file only makes sense with atomicops_internals_x86_gcc.h -- it | |||||
| // depends on structs that are defined in that file. If atomicops.h | |||||
| // doesn't sub-include that file, then we aren't needed, and shouldn't | |||||
| // try to do anything. | |||||
| #ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |||||
| // Inline cpuid instruction. In PIC compilations, %ebx contains the address | |||||
| // of the global offset table. To avoid breaking such executables, this code | |||||
| // must preserve that register's value across cpuid instructions. | |||||
| #if defined(__i386__) | |||||
| #define cpuid(a, b, c, d, inp) \ | |||||
| asm ("mov %%ebx, %%edi\n" \ | |||||
| "cpuid\n" \ | |||||
| "xchg %%edi, %%ebx\n" \ | |||||
| : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) | |||||
| #elif defined (__x86_64__) | |||||
| #define cpuid(a, b, c, d, inp) \ | |||||
| asm ("mov %%rbx, %%rdi\n" \ | |||||
| "cpuid\n" \ | |||||
| "xchg %%rdi, %%rbx\n" \ | |||||
| : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) | |||||
| #endif | |||||
| #if defined(cpuid) // initialize the struct only on x86 | |||||
| // Set the flags so that code will run correctly and conservatively, so even | |||||
| // if we haven't been initialized yet, we're probably single threaded, and our | |||||
| // default values should hopefully be pretty safe. | |||||
| struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { | |||||
| false, // bug can't exist before process spawns multiple threads | |||||
| false, // no SSE2 | |||||
| }; | |||||
| // Initialize the AtomicOps_Internalx86CPUFeatures struct. | |||||
| static void AtomicOps_Internalx86CPUFeaturesInit() { | |||||
| uint32 eax; | |||||
| uint32 ebx; | |||||
| uint32 ecx; | |||||
| uint32 edx; | |||||
| // Get vendor string (issue CPUID with eax = 0) | |||||
| cpuid(eax, ebx, ecx, edx, 0); | |||||
| char vendor[13]; | |||||
| memcpy(vendor, &ebx, 4); | |||||
| memcpy(vendor + 4, &edx, 4); | |||||
| memcpy(vendor + 8, &ecx, 4); | |||||
| vendor[12] = 0; | |||||
| // get feature flags in ecx/edx, and family/model in eax | |||||
| cpuid(eax, ebx, ecx, edx, 1); | |||||
| int family = (eax >> 8) & 0xf; // family and model fields | |||||
| int model = (eax >> 4) & 0xf; | |||||
| if (family == 0xf) { // use extended family and model fields | |||||
| family += (eax >> 20) & 0xff; | |||||
| model += ((eax >> 16) & 0xf) << 4; | |||||
| } | |||||
| // Opteron Rev E has a bug in which on very rare occasions a locked | |||||
| // instruction doesn't act as a read-acquire barrier if followed by a | |||||
| // non-locked read-modify-write instruction. Rev F has this bug in | |||||
| // pre-release versions, but not in versions released to customers, | |||||
| // so we test only for Rev E, which is family 15, model 32..63 inclusive. | |||||
| if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD | |||||
| family == 15 && | |||||
| 32 <= model && model <= 63) { | |||||
| AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; | |||||
| } else { | |||||
| AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; | |||||
| } | |||||
| // edx bit 26 is SSE2 which we use to tell use whether we can use mfence | |||||
| AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); | |||||
| } | |||||
| namespace { | |||||
| class AtomicOpsx86Initializer { | |||||
| public: | |||||
| AtomicOpsx86Initializer() { | |||||
| AtomicOps_Internalx86CPUFeaturesInit(); | |||||
| } | |||||
| }; | |||||
| // A global to get use initialized on startup via static initialization :/ | |||||
| AtomicOpsx86Initializer g_initer; | |||||
| } // namespace | |||||
| #endif // if x86 | |||||
| #endif // ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |||||
| @ -1,266 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This file is an internal atomic implementation, use base/atomicops.h instead. | |||||
| #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |||||
| #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |||||
| #pragma once | |||||
| // This struct is not part of the public API of this module; clients may not | |||||
| // use it. | |||||
| // Features of this x86. Values may not be correct before main() is run, | |||||
| // but are set conservatively. | |||||
| struct AtomicOps_x86CPUFeatureStruct { | |||||
| bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | |||||
| // after acquire compare-and-swap. | |||||
| bool has_sse2; // Processor has SSE2. | |||||
| }; | |||||
| extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; | |||||
| #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |||||
| namespace base { | |||||
| namespace subtle { | |||||
| // 32-bit low-level operations on any platform. | |||||
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 prev; | |||||
| __asm__ __volatile__("lock; cmpxchgl %1,%2" | |||||
| : "=a" (prev) | |||||
| : "q" (new_value), "m" (*ptr), "0" (old_value) | |||||
| : "memory"); | |||||
| return prev; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |||||
| Atomic32 new_value) { | |||||
| __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. | |||||
| : "=r" (new_value) | |||||
| : "m" (*ptr), "0" (new_value) | |||||
| : "memory"); | |||||
| return new_value; // Now it's the previous value. | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| Atomic32 temp = increment; | |||||
| __asm__ __volatile__("lock; xaddl %0,%1" | |||||
| : "+r" (temp), "+m" (*ptr) | |||||
| : : "memory"); | |||||
| // temp now holds the old value of *ptr | |||||
| return temp + increment; | |||||
| } | |||||
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| Atomic32 temp = increment; | |||||
| __asm__ __volatile__("lock; xaddl %0,%1" | |||||
| : "+r" (temp), "+m" (*ptr) | |||||
| : : "memory"); | |||||
| // temp now holds the old value of *ptr | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |||||
| __asm__ __volatile__("lfence" : : : "memory"); | |||||
| } | |||||
| return temp + increment; | |||||
| } | |||||
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |||||
| __asm__ __volatile__("lfence" : : : "memory"); | |||||
| } | |||||
| return x; | |||||
| } | |||||
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| #if defined(__x86_64__) | |||||
| // 64-bit implementations of memory barrier can be simpler, because it | |||||
| // "mfence" is guaranteed to exist. | |||||
| inline void MemoryBarrier() { | |||||
| __asm__ __volatile__("mfence" : : : "memory"); | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| MemoryBarrier(); | |||||
| } | |||||
| #else | |||||
| inline void MemoryBarrier() { | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |||||
| __asm__ __volatile__("mfence" : : : "memory"); | |||||
| } else { // mfence is faster but not present on PIII | |||||
| Atomic32 x = 0; | |||||
| NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |||||
| } | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |||||
| *ptr = value; | |||||
| __asm__ __volatile__("mfence" : : : "memory"); | |||||
| } else { | |||||
| NoBarrier_AtomicExchange(ptr, value); | |||||
| // acts as a barrier on PIII | |||||
| } | |||||
| } | |||||
| #endif | |||||
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| ATOMICOPS_COMPILER_BARRIER(); | |||||
| *ptr = value; // An x86 store acts as a release barrier. | |||||
| // See comments in Atomic64 version of Release_Store(), below. | |||||
| } | |||||
| inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |||||
| Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. | |||||
| // See comments in Atomic64 version of Release_Store(), below. | |||||
| ATOMICOPS_COMPILER_BARRIER(); | |||||
| return value; | |||||
| } | |||||
| inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| #if defined(__x86_64__) | |||||
| // 64-bit low-level operations on 64-bit platform. | |||||
| inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| Atomic64 prev; | |||||
| __asm__ __volatile__("lock; cmpxchgq %1,%2" | |||||
| : "=a" (prev) | |||||
| : "q" (new_value), "m" (*ptr), "0" (old_value) | |||||
| : "memory"); | |||||
| return prev; | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |||||
| Atomic64 new_value) { | |||||
| __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. | |||||
| : "=r" (new_value) | |||||
| : "m" (*ptr), "0" (new_value) | |||||
| : "memory"); | |||||
| return new_value; // Now it's the previous value. | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |||||
| Atomic64 increment) { | |||||
| Atomic64 temp = increment; | |||||
| __asm__ __volatile__("lock; xaddq %0,%1" | |||||
| : "+r" (temp), "+m" (*ptr) | |||||
| : : "memory"); | |||||
| // temp now contains the previous value of *ptr | |||||
| return temp + increment; | |||||
| } | |||||
| inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |||||
| Atomic64 increment) { | |||||
| Atomic64 temp = increment; | |||||
| __asm__ __volatile__("lock; xaddq %0,%1" | |||||
| : "+r" (temp), "+m" (*ptr) | |||||
| : : "memory"); | |||||
| // temp now contains the previous value of *ptr | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |||||
| __asm__ __volatile__("lfence" : : : "memory"); | |||||
| } | |||||
| return temp + increment; | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| *ptr = value; | |||||
| MemoryBarrier(); | |||||
| } | |||||
| inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| ATOMICOPS_COMPILER_BARRIER(); | |||||
| *ptr = value; // An x86 store acts as a release barrier | |||||
| // for current AMD/Intel chips as of Jan 2008. | |||||
| // See also Acquire_Load(), below. | |||||
| // When new chips come out, check: | |||||
| // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |||||
| // System Programming Guide, Chatper 7: Multiple-processor management, | |||||
| // Section 7.2, Memory Ordering. | |||||
| // Last seen at: | |||||
| // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |||||
| // | |||||
| // x86 stores/loads fail to act as barriers for a few instructions (clflush | |||||
| // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are | |||||
| // not generated by the compiler, and are rare. Users of these instructions | |||||
| // need to know about cache behaviour in any case since all of these involve | |||||
| // either flushing cache lines or non-temporal cache hints. | |||||
| } | |||||
| inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |||||
| Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, | |||||
| // for current AMD/Intel chips as of Jan 2008. | |||||
| // See also Release_Store(), above. | |||||
| ATOMICOPS_COMPILER_BARRIER(); | |||||
| return value; | |||||
| } | |||||
| inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |||||
| __asm__ __volatile__("lfence" : : : "memory"); | |||||
| } | |||||
| return x; | |||||
| } | |||||
| inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| #endif // defined(__x86_64__) | |||||
| } // namespace base::subtle | |||||
| } // namespace base | |||||
| #undef ATOMICOPS_COMPILER_BARRIER | |||||
| #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |||||
| @ -1,281 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This file is an internal atomic implementation, use base/atomicops.h instead. | |||||
| #ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | |||||
| #define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | |||||
| #pragma once | |||||
| #include <libkern/OSAtomic.h> | |||||
| namespace base { | |||||
| namespace subtle { | |||||
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 prev_value; | |||||
| do { | |||||
| if (OSAtomicCompareAndSwap32(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr))) { | |||||
| return old_value; | |||||
| } | |||||
| prev_value = *ptr; | |||||
| } while (prev_value == old_value); | |||||
| return prev_value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 old_value; | |||||
| do { | |||||
| old_value = *ptr; | |||||
| } while (!OSAtomicCompareAndSwap32(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr))); | |||||
| return old_value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, | |||||
| Atomic32 increment) { | |||||
| return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | |||||
| } | |||||
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, | |||||
| Atomic32 increment) { | |||||
| return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | |||||
| } | |||||
| inline void MemoryBarrier() { | |||||
| OSMemoryBarrier(); | |||||
| } | |||||
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| Atomic32 prev_value; | |||||
| do { | |||||
| if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | |||||
| const_cast<Atomic32*>(ptr))) { | |||||
| return old_value; | |||||
| } | |||||
| prev_value = *ptr; | |||||
| } while (prev_value == old_value); | |||||
| return prev_value; | |||||
| } | |||||
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return Acquire_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| MemoryBarrier(); | |||||
| } | |||||
| inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { | |||||
| MemoryBarrier(); | |||||
| *ptr = value; | |||||
| } | |||||
| inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { | |||||
| Atomic32 value = *ptr; | |||||
| MemoryBarrier(); | |||||
| return value; | |||||
| } | |||||
| inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| #ifdef __LP64__ | |||||
| // 64-bit implementation on 64-bit platform | |||||
| inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| Atomic64 prev_value; | |||||
| do { | |||||
| if (OSAtomicCompareAndSwap64(old_value, new_value, | |||||
| reinterpret_cast<volatile int64_t*>(ptr))) { | |||||
| return old_value; | |||||
| } | |||||
| prev_value = *ptr; | |||||
| } while (prev_value == old_value); | |||||
| return prev_value; | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, | |||||
| Atomic64 new_value) { | |||||
| Atomic64 old_value; | |||||
| do { | |||||
| old_value = *ptr; | |||||
| } while (!OSAtomicCompareAndSwap64(old_value, new_value, | |||||
| reinterpret_cast<volatile int64_t*>(ptr))); | |||||
| return old_value; | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, | |||||
| Atomic64 increment) { | |||||
| return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); | |||||
| } | |||||
| inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, | |||||
| Atomic64 increment) { | |||||
| return OSAtomicAdd64Barrier(increment, | |||||
| reinterpret_cast<volatile int64_t*>(ptr)); | |||||
| } | |||||
| inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| Atomic64 prev_value; | |||||
| do { | |||||
| if (OSAtomicCompareAndSwap64Barrier( | |||||
| old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { | |||||
| return old_value; | |||||
| } | |||||
| prev_value = *ptr; | |||||
| } while (prev_value == old_value); | |||||
| return prev_value; | |||||
| } | |||||
| inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| // The lib kern interface does not distinguish between | |||||
| // Acquire and Release memory barriers; they are equivalent. | |||||
| return Acquire_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { | |||||
| *ptr = value; | |||||
| MemoryBarrier(); | |||||
| } | |||||
| inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { | |||||
| MemoryBarrier(); | |||||
| *ptr = value; | |||||
| } | |||||
| inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { | |||||
| Atomic64 value = *ptr; | |||||
| MemoryBarrier(); | |||||
| return value; | |||||
| } | |||||
| inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| #endif // defined(__LP64__) | |||||
| // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different | |||||
| // on the Mac, even when they are the same size. We need to explicitly cast | |||||
| // from AtomicWord to Atomic32 to implement the AtomicWord interface. | |||||
| // When in 64-bit mode, AtomicWord is the same as Atomic64, so we need not | |||||
| // add duplicate definitions. | |||||
| #ifndef __LP64__ | |||||
| #define AtomicWordCastType Atomic32 | |||||
| inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, | |||||
| AtomicWord old_value, | |||||
| AtomicWord new_value) { | |||||
| return NoBarrier_CompareAndSwap( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |||||
| old_value, new_value); | |||||
| } | |||||
| inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, | |||||
| AtomicWord new_value) { | |||||
| return NoBarrier_AtomicExchange( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | |||||
| } | |||||
| inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, | |||||
| AtomicWord increment) { | |||||
| return NoBarrier_AtomicIncrement( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | |||||
| } | |||||
| inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, | |||||
| AtomicWord increment) { | |||||
| return Barrier_AtomicIncrement( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | |||||
| } | |||||
| inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, | |||||
| AtomicWord old_value, | |||||
| AtomicWord new_value) { | |||||
| return base::subtle::Acquire_CompareAndSwap( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |||||
| old_value, new_value); | |||||
| } | |||||
| inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, | |||||
| AtomicWord old_value, | |||||
| AtomicWord new_value) { | |||||
| return base::subtle::Release_CompareAndSwap( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |||||
| old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { | |||||
| NoBarrier_Store( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |||||
| } | |||||
| inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { | |||||
| return base::subtle::Acquire_Store( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |||||
| } | |||||
| inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { | |||||
| return base::subtle::Release_Store( | |||||
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |||||
| } | |||||
| inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { | |||||
| return NoBarrier_Load( | |||||
| reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |||||
| } | |||||
| inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { | |||||
| return base::subtle::Acquire_Load( | |||||
| reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |||||
| } | |||||
| inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | |||||
| return base::subtle::Release_Load( | |||||
| reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |||||
| } | |||||
| #undef AtomicWordCastType | |||||
| #endif | |||||
| } // namespace base::subtle | |||||
| } // namespace base | |||||
| #endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | |||||
| @ -1,181 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This file is an internal atomic implementation, use base/atomicops.h instead. | |||||
| #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |||||
| #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |||||
| #pragma once | |||||
| #include <windows.h> | |||||
| namespace base { | |||||
| namespace subtle { | |||||
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| LONG result = InterlockedCompareExchange( | |||||
| reinterpret_cast<volatile LONG*>(ptr), | |||||
| static_cast<LONG>(new_value), | |||||
| static_cast<LONG>(old_value)); | |||||
| return static_cast<Atomic32>(result); | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |||||
| Atomic32 new_value) { | |||||
| LONG result = InterlockedExchange( | |||||
| reinterpret_cast<volatile LONG*>(ptr), | |||||
| static_cast<LONG>(new_value)); | |||||
| return static_cast<Atomic32>(result); | |||||
| } | |||||
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| return InterlockedExchangeAdd( | |||||
| reinterpret_cast<volatile LONG*>(ptr), | |||||
| static_cast<LONG>(increment)) + increment; | |||||
| } | |||||
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |||||
| Atomic32 increment) { | |||||
| return Barrier_AtomicIncrement(ptr, increment); | |||||
| } | |||||
| #if !(defined(_MSC_VER) && _MSC_VER >= 1400) | |||||
| #error "We require at least vs2005 for MemoryBarrier" | |||||
| #endif | |||||
| inline void MemoryBarrier() { | |||||
| // We use MemoryBarrier from WinNT.h | |||||
| ::MemoryBarrier(); | |||||
| } | |||||
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |||||
| Atomic32 old_value, | |||||
| Atomic32 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| NoBarrier_AtomicExchange(ptr, value); | |||||
| // acts as a barrier in this implementation | |||||
| } | |||||
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |||||
| *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | |||||
| // See comments in Atomic64 version of Release_Store() below. | |||||
| } | |||||
| inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |||||
| Atomic32 value = *ptr; | |||||
| return value; | |||||
| } | |||||
| inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| #if defined(_WIN64) | |||||
| // 64-bit low-level operations on 64-bit platform. | |||||
| COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | |||||
| inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| PVOID result = InterlockedCompareExchangePointer( | |||||
| reinterpret_cast<volatile PVOID*>(ptr), | |||||
| reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | |||||
| return reinterpret_cast<Atomic64>(result); | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |||||
| Atomic64 new_value) { | |||||
| PVOID result = InterlockedExchangePointer( | |||||
| reinterpret_cast<volatile PVOID*>(ptr), | |||||
| reinterpret_cast<PVOID>(new_value)); | |||||
| return reinterpret_cast<Atomic64>(result); | |||||
| } | |||||
| inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |||||
| Atomic64 increment) { | |||||
| return InterlockedExchangeAdd64( | |||||
| reinterpret_cast<volatile LONGLONG*>(ptr), | |||||
| static_cast<LONGLONG>(increment)) + increment; | |||||
| } | |||||
| inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |||||
| Atomic64 increment) { | |||||
| return Barrier_AtomicIncrement(ptr, increment); | |||||
| } | |||||
| inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| *ptr = value; | |||||
| } | |||||
| inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| NoBarrier_AtomicExchange(ptr, value); | |||||
| // acts as a barrier in this implementation | |||||
| } | |||||
| inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |||||
| *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | |||||
| // When new chips come out, check: | |||||
| // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |||||
| // System Programming Guide, Chatper 7: Multiple-processor management, | |||||
| // Section 7.2, Memory Ordering. | |||||
| // Last seen at: | |||||
| // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |||||
| } | |||||
| inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |||||
| Atomic64 value = *ptr; | |||||
| return value; | |||||
| } | |||||
| inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |||||
| MemoryBarrier(); | |||||
| return *ptr; | |||||
| } | |||||
| inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |||||
| Atomic64 old_value, | |||||
| Atomic64 new_value) { | |||||
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |||||
| } | |||||
| #endif // defined(_WIN64) | |||||
| } // namespace base::subtle | |||||
| } // namespace base | |||||
| #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |||||
| @ -1,121 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_COMPILER_SPECIFIC_H_ | |||||
| #define BASE_COMPILER_SPECIFIC_H_ | |||||
| #pragma once | |||||
| #include "build/build_config.h" | |||||
| #if defined(COMPILER_MSVC) | |||||
| // Macros for suppressing and disabling warnings on MSVC. | |||||
| // | |||||
| // Warning numbers are enumerated at: | |||||
| // http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx | |||||
| // | |||||
| // The warning pragma: | |||||
| // http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx | |||||
| // | |||||
| // Using __pragma instead of #pragma inside macros: | |||||
| // http://msdn.microsoft.com/en-us/library/d9x1s805.aspx | |||||
| // MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and | |||||
| // for the next line of the source file. | |||||
| #define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n)) | |||||
| // MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled. | |||||
| // The warning remains disabled until popped by MSVC_POP_WARNING. | |||||
| #define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \ | |||||
| __pragma(warning(disable:n)) | |||||
| // MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level. The level | |||||
| // remains in effect until popped by MSVC_POP_WARNING(). Use 0 to disable all | |||||
| // warnings. | |||||
| #define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n)) | |||||
| // Pop effects of innermost MSVC_PUSH_* macro. | |||||
| #define MSVC_POP_WARNING() __pragma(warning(pop)) | |||||
| #define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off)) | |||||
| #define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on)) | |||||
| // Allows |this| to be passed as an argument in constructor initializer lists. | |||||
| // This uses push/pop instead of the seemingly simpler suppress feature to avoid | |||||
| // having the warning be disabled for more than just |code|. | |||||
| // | |||||
| // Example usage: | |||||
| // Foo::Foo() : x(NULL), ALLOW_THIS_IN_INITIALIZER_LIST(y(this)), z(3) {} | |||||
| // | |||||
| // Compiler warning C4355: 'this': used in base member initializer list: | |||||
| // http://msdn.microsoft.com/en-us/library/3c594ae3(VS.80).aspx | |||||
| #define ALLOW_THIS_IN_INITIALIZER_LIST(code) MSVC_PUSH_DISABLE_WARNING(4355) \ | |||||
| code \ | |||||
| MSVC_POP_WARNING() | |||||
| #else // Not MSVC | |||||
| #define MSVC_SUPPRESS_WARNING(n) | |||||
| #define MSVC_PUSH_DISABLE_WARNING(n) | |||||
| #define MSVC_PUSH_WARNING_LEVEL(n) | |||||
| #define MSVC_POP_WARNING() | |||||
| #define MSVC_DISABLE_OPTIMIZE() | |||||
| #define MSVC_ENABLE_OPTIMIZE() | |||||
| #define ALLOW_THIS_IN_INITIALIZER_LIST(code) code | |||||
| #endif // COMPILER_MSVC | |||||
| // Annotate a variable indicating it's ok if the variable is not used. | |||||
| // (Typically used to silence a compiler warning when the assignment | |||||
| // is important for some other reason.) | |||||
| // Use like: | |||||
| // int x ALLOW_UNUSED = ...; | |||||
| #if defined(COMPILER_GCC) | |||||
| #define ALLOW_UNUSED __attribute__((unused)) | |||||
| #else | |||||
| #define ALLOW_UNUSED | |||||
| #endif | |||||
| // Annotate a virtual method indicating it must be overriding a virtual | |||||
| // method in the parent class. | |||||
| // Use like: | |||||
| // virtual void foo() OVERRIDE; | |||||
| #if defined(COMPILER_MSVC) | |||||
| #define OVERRIDE override | |||||
| #elif defined(__clang__) | |||||
| #define OVERRIDE override | |||||
| #else | |||||
| #define OVERRIDE | |||||
| #endif | |||||
| // Annotate a function indicating the caller must examine the return value. | |||||
| // Use like: | |||||
| // int foo() WARN_UNUSED_RESULT; | |||||
| #if defined(COMPILER_GCC) | |||||
| #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) | |||||
| #else | |||||
| #define WARN_UNUSED_RESULT | |||||
| #endif | |||||
| // Tell the compiler a function is using a printf-style format string. | |||||
| // |format_param| is the one-based index of the format string parameter; | |||||
| // |dots_param| is the one-based index of the "..." parameter. | |||||
| // For v*printf functions (which take a va_list), pass 0 for dots_param. | |||||
| // (This is undocumented but matches what the system C headers do.) | |||||
| #if defined(COMPILER_GCC) | |||||
| #define PRINTF_FORMAT(format_param, dots_param) \ | |||||
| __attribute__((format(printf, format_param, dots_param))) | |||||
| #else | |||||
| #define PRINTF_FORMAT(format_param, dots_param) | |||||
| #endif | |||||
| // WPRINTF_FORMAT is the same, but for wide format strings. | |||||
| // This doesn't appear to yet be implemented in any compiler. | |||||
| // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 . | |||||
| #define WPRINTF_FORMAT(format_param, dots_param) | |||||
| // If available, it would look like: | |||||
| // __attribute__((format(wprintf, format_param, dots_param))) | |||||
| #endif // BASE_COMPILER_SPECIFIC_H_ | |||||
| @ -1,44 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/lazy_instance.h" | |||||
| #include "base/at_exit.h" | |||||
| #include "base/atomicops.h" | |||||
| #include "base/basictypes.h" | |||||
| #include "base/threading/platform_thread.h" | |||||
| #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |||||
| namespace base { | |||||
| bool LazyInstanceHelper::NeedsInstance() { | |||||
| // Try to create the instance, if we're the first, will go from EMPTY | |||||
| // to CREATING, otherwise we've already been beaten here. | |||||
| if (base::subtle::Acquire_CompareAndSwap( | |||||
| &state_, STATE_EMPTY, STATE_CREATING) == STATE_EMPTY) { | |||||
| // Caller must create instance | |||||
| return true; | |||||
| } else { | |||||
| // It's either in the process of being created, or already created. Spin. | |||||
| while (base::subtle::NoBarrier_Load(&state_) != STATE_CREATED) | |||||
| PlatformThread::YieldCurrentThread(); | |||||
| } | |||||
| // Someone else created the instance. | |||||
| return false; | |||||
| } | |||||
| void LazyInstanceHelper::CompleteInstance(void* instance, void (*dtor)(void*)) { | |||||
| // See the comment to the corresponding HAPPENS_AFTER in Pointer(). | |||||
| ANNOTATE_HAPPENS_BEFORE(&state_); | |||||
| // Instance is created, go from CREATING to CREATED. | |||||
| base::subtle::Release_Store(&state_, STATE_CREATED); | |||||
| // Make sure that the lazily instantiated object will get destroyed at exit. | |||||
| if (dtor) | |||||
| base::AtExitManager::RegisterCallback(dtor, instance); | |||||
| } | |||||
| } // namespace base | |||||
| @ -1,177 +0,0 @@ | |||||
| // Copyright (c) 2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // The LazyInstance<Type, Traits> class manages a single instance of Type, | |||||
| // which will be lazily created on the first time it's accessed. This class is | |||||
| // useful for places you would normally use a function-level static, but you | |||||
| // need to have guaranteed thread-safety. The Type constructor will only ever | |||||
| // be called once, even if two threads are racing to create the object. Get() | |||||
| // and Pointer() will always return the same, completely initialized instance. | |||||
| // When the instance is constructed it is registered with AtExitManager. The | |||||
| // destructor will be called on program exit. | |||||
| // | |||||
| // LazyInstance is completely thread safe, assuming that you create it safely. | |||||
| // The class was designed to be POD initialized, so it shouldn't require a | |||||
| // static constructor. It really only makes sense to declare a LazyInstance as | |||||
| // a global variable using the base::LinkerInitialized constructor. | |||||
| // | |||||
| // LazyInstance is similar to Singleton, except it does not have the singleton | |||||
| // property. You can have multiple LazyInstance's of the same type, and each | |||||
| // will manage a unique instance. It also preallocates the space for Type, as | |||||
| // to avoid allocating the Type instance on the heap. This may help with the | |||||
| // performance of creating the instance, and reducing heap fragmentation. This | |||||
| // requires that Type be a complete type so we can determine the size. | |||||
| // | |||||
| // Example usage: | |||||
| // static LazyInstance<MyClass> my_instance(base::LINKER_INITIALIZED); | |||||
| // void SomeMethod() { | |||||
| // my_instance.Get().SomeMethod(); // MyClass::SomeMethod() | |||||
| // | |||||
| // MyClass* ptr = my_instance.Pointer(); | |||||
| // ptr->DoDoDo(); // MyClass::DoDoDo | |||||
| // } | |||||
| #ifndef BASE_LAZY_INSTANCE_H_ | |||||
| #define BASE_LAZY_INSTANCE_H_ | |||||
| #pragma once | |||||
| #include <new> // For placement new. | |||||
| #include "base/atomicops.h" | |||||
| #include "base/basictypes.h" | |||||
| #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |||||
| #include "base/threading/thread_restrictions.h" | |||||
| namespace base { | |||||
| template <typename Type> | |||||
| struct DefaultLazyInstanceTraits { | |||||
| static const bool kAllowedToAccessOnNonjoinableThread = false; | |||||
| static Type* New(void* instance) { | |||||
| // Use placement new to initialize our instance in our preallocated space. | |||||
| // The parenthesis is very important here to force POD type initialization. | |||||
| return new (instance) Type(); | |||||
| } | |||||
| static void Delete(void* instance) { | |||||
| // Explicitly call the destructor. | |||||
| reinterpret_cast<Type*>(instance)->~Type(); | |||||
| } | |||||
| }; | |||||
| template <typename Type> | |||||
| struct LeakyLazyInstanceTraits { | |||||
| static const bool kAllowedToAccessOnNonjoinableThread = true; | |||||
| static Type* New(void* instance) { | |||||
| return DefaultLazyInstanceTraits<Type>::New(instance); | |||||
| } | |||||
| // Rather than define an empty Delete function, we make Delete itself | |||||
| // a null pointer. This allows us to completely sidestep registering | |||||
| // this object with an AtExitManager, which allows you to use | |||||
| // LeakyLazyInstanceTraits in contexts where you don't have an | |||||
| // AtExitManager. | |||||
| static void (*Delete)(void* instance); | |||||
| }; | |||||
| template <typename Type> | |||||
| void (*LeakyLazyInstanceTraits<Type>::Delete)(void* instance) = NULL; | |||||
| // We pull out some of the functionality into a non-templated base, so that we | |||||
| // can implement the more complicated pieces out of line in the .cc file. | |||||
| class LazyInstanceHelper { | |||||
| protected: | |||||
| enum { | |||||
| STATE_EMPTY = 0, | |||||
| STATE_CREATING = 1, | |||||
| STATE_CREATED = 2 | |||||
| }; | |||||
| explicit LazyInstanceHelper(LinkerInitialized /*unused*/) {/* state_ is 0 */} | |||||
| // Declaring a destructor (even if it's empty) will cause MSVC to register a | |||||
| // static initializer to register the empty destructor with atexit(). | |||||
| // Check if instance needs to be created. If so return true otherwise | |||||
| // if another thread has beat us, wait for instance to be created and | |||||
| // return false. | |||||
| bool NeedsInstance(); | |||||
| // After creating an instance, call this to register the dtor to be called | |||||
| // at program exit and to update the state to STATE_CREATED. | |||||
| void CompleteInstance(void* instance, void (*dtor)(void*)); | |||||
| base::subtle::Atomic32 state_; | |||||
| private: | |||||
| DISALLOW_COPY_AND_ASSIGN(LazyInstanceHelper); | |||||
| }; | |||||
| template <typename Type, typename Traits = DefaultLazyInstanceTraits<Type> > | |||||
| class LazyInstance : public LazyInstanceHelper { | |||||
| public: | |||||
| explicit LazyInstance(LinkerInitialized x) : LazyInstanceHelper(x) { } | |||||
| // Declaring a destructor (even if it's empty) will cause MSVC to register a | |||||
| // static initializer to register the empty destructor with atexit(). | |||||
| Type& Get() { | |||||
| return *Pointer(); | |||||
| } | |||||
| Type* Pointer() { | |||||
| if (!Traits::kAllowedToAccessOnNonjoinableThread) | |||||
| base::ThreadRestrictions::AssertSingletonAllowed(); | |||||
| // We will hopefully have fast access when the instance is already created. | |||||
| if ((base::subtle::NoBarrier_Load(&state_) != STATE_CREATED) && | |||||
| NeedsInstance()) { | |||||
| // Create the instance in the space provided by |buf_|. | |||||
| instance_ = Traits::New(buf_); | |||||
| // Traits::Delete will be null for LeakyLazyInstanceTraits | |||||
| void (*dtor)(void*) = Traits::Delete; | |||||
| CompleteInstance(this, (dtor == NULL) ? NULL : OnExit); | |||||
| } | |||||
| // This annotation helps race detectors recognize correct lock-less | |||||
| // synchronization between different threads calling Pointer(). | |||||
| // We suggest dynamic race detection tool that "Traits::New" above | |||||
| // and CompleteInstance(...) happens before "return instance_" below. | |||||
| // See the corresponding HAPPENS_BEFORE in CompleteInstance(...). | |||||
| ANNOTATE_HAPPENS_AFTER(&state_); | |||||
| return instance_; | |||||
| } | |||||
| bool operator==(Type* p) { | |||||
| switch (base::subtle::NoBarrier_Load(&state_)) { | |||||
| case STATE_EMPTY: | |||||
| return p == NULL; | |||||
| case STATE_CREATING: | |||||
| return static_cast<int8*>(static_cast<void*>(p)) == buf_; | |||||
| case STATE_CREATED: | |||||
| return p == instance_; | |||||
| default: | |||||
| return false; | |||||
| } | |||||
| } | |||||
| private: | |||||
| // Adapter function for use with AtExit. This should be called single | |||||
| // threaded, so don't use atomic operations. | |||||
| // Calling OnExit while the instance is in use by other threads is a mistake. | |||||
| static void OnExit(void* lazy_instance) { | |||||
| LazyInstance<Type, Traits>* me = | |||||
| reinterpret_cast<LazyInstance<Type, Traits>*>(lazy_instance); | |||||
| Traits::Delete(me->instance_); | |||||
| me->instance_ = NULL; | |||||
| base::subtle::Release_Store(&me->state_, STATE_EMPTY); | |||||
| } | |||||
| int8 buf_[sizeof(Type)]; // Preallocate the space for the Type instance. | |||||
| Type *instance_; | |||||
| DISALLOW_COPY_AND_ASSIGN(LazyInstance); | |||||
| }; | |||||
| } // namespace base | |||||
| #endif // BASE_LAZY_INSTANCE_H_ | |||||
| @ -1,55 +0,0 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_PORT_H_ | |||||
| #define BASE_PORT_H_ | |||||
| #pragma once | |||||
| #include <stdarg.h> | |||||
| #include "build/build_config.h" | |||||
| #ifdef COMPILER_MSVC | |||||
| #define GG_LONGLONG(x) x##I64 | |||||
| #define GG_ULONGLONG(x) x##UI64 | |||||
| #else | |||||
| #define GG_LONGLONG(x) x##LL | |||||
| #define GG_ULONGLONG(x) x##ULL | |||||
| #endif | |||||
| // Per C99 7.8.14, define __STDC_CONSTANT_MACROS before including <stdint.h> | |||||
| // to get the INTn_C and UINTn_C macros for integer constants. It's difficult | |||||
| // to guarantee any specific ordering of header includes, so it's difficult to | |||||
| // guarantee that the INTn_C macros can be defined by including <stdint.h> at | |||||
| // any specific point. Provide GG_INTn_C macros instead. | |||||
| #define GG_INT8_C(x) (x) | |||||
| #define GG_INT16_C(x) (x) | |||||
| #define GG_INT32_C(x) (x) | |||||
| #define GG_INT64_C(x) GG_LONGLONG(x) | |||||
| #define GG_UINT8_C(x) (x ## U) | |||||
| #define GG_UINT16_C(x) (x ## U) | |||||
| #define GG_UINT32_C(x) (x ## U) | |||||
| #define GG_UINT64_C(x) GG_ULONGLONG(x) | |||||
| // It's possible for functions that use a va_list, such as StringPrintf, to | |||||
| // invalidate the data in it upon use. The fix is to make a copy of the | |||||
| // structure before using it and use that copy instead. va_copy is provided | |||||
| // for this purpose. MSVC does not provide va_copy, so define an | |||||
| // implementation here. It is not guaranteed that assignment is a copy, so the | |||||
| // StringUtil.VariableArgsFunc unit test tests this capability. | |||||
| #if defined(COMPILER_GCC) | |||||
| #define GG_VA_COPY(a, b) (va_copy(a, b)) | |||||
| #elif defined(COMPILER_MSVC) | |||||
| #define GG_VA_COPY(a, b) (a = b) | |||||
| #endif | |||||
| // Define an OS-neutral wrapper for shared library entry points | |||||
| #if defined(OS_WIN) | |||||
| #define API_CALL __stdcall | |||||
| #else | |||||
| #define API_CALL | |||||
| #endif | |||||
| #endif // BASE_PORT_H_ | |||||
| @ -1 +0,0 @@ | |||||
| // Empty File. | |||||
| @ -1,383 +1,24 @@ | |||||
| // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // Scopers help you manage ownership of a pointer, helping you easily manage the | |||||
| // a pointer within a scope, and automatically destroying the pointer at the | |||||
| // end of a scope. There are two main classes you will use, which correspond | |||||
| // to the operators new/delete and new[]/delete[]. | |||||
| // | |||||
| // Example usage (scoped_ptr): | |||||
| // { | |||||
| // scoped_ptr<Foo> foo(new Foo("wee")); | |||||
| // } // foo goes out of scope, releasing the pointer with it. | |||||
| // Copyright (C) 2011 Google Inc. | |||||
| // | // | ||||
| // { | |||||
| // scoped_ptr<Foo> foo; // No pointer managed. | |||||
| // foo.reset(new Foo("wee")); // Now a pointer is managed. | |||||
| // foo.reset(new Foo("wee2")); // Foo("wee") was destroyed. | |||||
| // foo.reset(new Foo("wee3")); // Foo("wee2") was destroyed. | |||||
| // foo->Method(); // Foo::Method() called. | |||||
| // foo.get()->Method(); // Foo::Method() called. | |||||
| // SomeFunc(foo.release()); // SomeFunc takes ownership, foo no longer | |||||
| // // manages a pointer. | |||||
| // foo.reset(new Foo("wee4")); // foo manages a pointer again. | |||||
| // foo.reset(); // Foo("wee4") destroyed, foo no longer | |||||
| // // manages a pointer. | |||||
| // } // foo wasn't managing a pointer, so nothing was destroyed. | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| // you may not use this file except in compliance with the License. | |||||
| // You may obtain a copy of the License at | |||||
| // | // | ||||
| // Example usage (scoped_array): | |||||
| // { | |||||
| // scoped_array<Foo> foo(new Foo[100]); | |||||
| // foo.get()->Method(); // Foo::Method on the 0th element. | |||||
| // foo[10].Method(); // Foo::Method on the 10th element. | |||||
| // } | |||||
| #ifndef BASE_SCOPED_PTR_H_ | |||||
| #define BASE_SCOPED_PTR_H_ | |||||
| #pragma once | |||||
| // This is an implementation designed to match the anticipated future TR2 | |||||
| // implementation of the scoped_ptr class, and its closely-related brethren, | |||||
| // scoped_array, scoped_ptr_malloc. | |||||
| #include <assert.h> | |||||
| #include <stddef.h> | |||||
| #include <stdlib.h> | |||||
| #include "base/compiler_specific.h" | |||||
| // A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T> | |||||
| // automatically deletes the pointer it holds (if any). | |||||
| // That is, scoped_ptr<T> owns the T object that it points to. | |||||
| // Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object. | |||||
| // Also like T*, scoped_ptr<T> is thread-compatible, and once you | |||||
| // dereference it, you get the threadsafety guarantees of T. | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | // | ||||
| // The size of a scoped_ptr is small: | |||||
| // sizeof(scoped_ptr<C>) == sizeof(C*) | |||||
| template <class C> | |||||
| class scoped_ptr { | |||||
| public: | |||||
| // The element type | |||||
| typedef C element_type; | |||||
| // Constructor. Defaults to initializing with NULL. | |||||
| // There is no way to create an uninitialized scoped_ptr. | |||||
| // The input parameter must be allocated with new. | |||||
| explicit scoped_ptr(C* p = NULL) : ptr_(p) { } | |||||
| // Destructor. If there is a C object, delete it. | |||||
| // We don't need to test ptr_ == NULL because C++ does that for us. | |||||
| ~scoped_ptr() { | |||||
| enum { type_must_be_complete = sizeof(C) }; | |||||
| delete ptr_; | |||||
| } | |||||
| // Reset. Deletes the current owned object, if any. | |||||
| // Then takes ownership of a new object, if given. | |||||
| // this->reset(this->get()) works. | |||||
| void reset(C* p = NULL) { | |||||
| if (p != ptr_) { | |||||
| enum { type_must_be_complete = sizeof(C) }; | |||||
| delete ptr_; | |||||
| ptr_ = p; | |||||
| } | |||||
| } | |||||
| // Accessors to get the owned object. | |||||
| // operator* and operator-> will assert() if there is no current object. | |||||
| C& operator*() const { | |||||
| assert(ptr_ != NULL); | |||||
| return *ptr_; | |||||
| } | |||||
| C* operator->() const { | |||||
| assert(ptr_ != NULL); | |||||
| return ptr_; | |||||
| } | |||||
| C* get() const { return ptr_; } | |||||
| // Comparison operators. | |||||
| // These return whether two scoped_ptr refer to the same object, not just to | |||||
| // two different but equal objects. | |||||
| bool operator==(C* p) const { return ptr_ == p; } | |||||
| bool operator!=(C* p) const { return ptr_ != p; } | |||||
| // Swap two scoped pointers. | |||||
| void swap(scoped_ptr& p2) { | |||||
| C* tmp = ptr_; | |||||
| ptr_ = p2.ptr_; | |||||
| p2.ptr_ = tmp; | |||||
| } | |||||
| // Release a pointer. | |||||
| // The return value is the current pointer held by this object. | |||||
| // If this object holds a NULL pointer, the return value is NULL. | |||||
| // After this operation, this object will hold a NULL pointer, | |||||
| // and will not own the object any more. | |||||
| C* release() WARN_UNUSED_RESULT { | |||||
| C* retVal = ptr_; | |||||
| ptr_ = NULL; | |||||
| return retVal; | |||||
| } | |||||
| private: | |||||
| C* ptr_; | |||||
| // Forbid comparison of scoped_ptr types. If C2 != C, it totally doesn't | |||||
| // make sense, and if C2 == C, it still doesn't make sense because you should | |||||
| // never have the same object owned by two different scoped_ptrs. | |||||
| template <class C2> bool operator==(scoped_ptr<C2> const& p2) const; | |||||
| template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const; | |||||
| // Disallow evil constructors | |||||
| scoped_ptr(const scoped_ptr&); | |||||
| void operator=(const scoped_ptr&); | |||||
| }; | |||||
| // Free functions | |||||
| template <class C> | |||||
| void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) { | |||||
| p1.swap(p2); | |||||
| } | |||||
| template <class C> | |||||
| bool operator==(C* p1, const scoped_ptr<C>& p2) { | |||||
| return p1 == p2.get(); | |||||
| } | |||||
| template <class C> | |||||
| bool operator!=(C* p1, const scoped_ptr<C>& p2) { | |||||
| return p1 != p2.get(); | |||||
| } | |||||
| // scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate | |||||
| // with new [] and the destructor deletes objects with delete []. | |||||
| // | |||||
| // As with scoped_ptr<C>, a scoped_array<C> either points to an object | |||||
| // or is NULL. A scoped_array<C> owns the object that it points to. | |||||
| // scoped_array<T> is thread-compatible, and once you index into it, | |||||
| // the returned objects have only the threadsafety guarantees of T. | |||||
| // | |||||
| // Size: sizeof(scoped_array<C>) == sizeof(C*) | |||||
| template <class C> | |||||
| class scoped_array { | |||||
| public: | |||||
| // The element type | |||||
| typedef C element_type; | |||||
| // Constructor. Defaults to intializing with NULL. | |||||
| // There is no way to create an uninitialized scoped_array. | |||||
| // The input parameter must be allocated with new []. | |||||
| explicit scoped_array(C* p = NULL) : array_(p) { } | |||||
| // Destructor. If there is a C object, delete it. | |||||
| // We don't need to test ptr_ == NULL because C++ does that for us. | |||||
| ~scoped_array() { | |||||
| enum { type_must_be_complete = sizeof(C) }; | |||||
| delete[] array_; | |||||
| } | |||||
| // Reset. Deletes the current owned object, if any. | |||||
| // Then takes ownership of a new object, if given. | |||||
| // this->reset(this->get()) works. | |||||
| void reset(C* p = NULL) { | |||||
| if (p != array_) { | |||||
| enum { type_must_be_complete = sizeof(C) }; | |||||
| delete[] array_; | |||||
| array_ = p; | |||||
| } | |||||
| } | |||||
| // Get one element of the current object. | |||||
| // Will assert() if there is no current object, or index i is negative. | |||||
| C& operator[](ptrdiff_t i) const { | |||||
| assert(i >= 0); | |||||
| assert(array_ != NULL); | |||||
| return array_[i]; | |||||
| } | |||||
| // Get a pointer to the zeroth element of the current object. | |||||
| // If there is no current object, return NULL. | |||||
| C* get() const { | |||||
| return array_; | |||||
| } | |||||
| // Comparison operators. | |||||
| // These return whether two scoped_array refer to the same object, not just to | |||||
| // two different but equal objects. | |||||
| bool operator==(C* p) const { return array_ == p; } | |||||
| bool operator!=(C* p) const { return array_ != p; } | |||||
| // Swap two scoped arrays. | |||||
| void swap(scoped_array& p2) { | |||||
| C* tmp = array_; | |||||
| array_ = p2.array_; | |||||
| p2.array_ = tmp; | |||||
| } | |||||
| // Release an array. | |||||
| // The return value is the current pointer held by this object. | |||||
| // If this object holds a NULL pointer, the return value is NULL. | |||||
| // After this operation, this object will hold a NULL pointer, | |||||
| // and will not own the object any more. | |||||
| C* release() WARN_UNUSED_RESULT { | |||||
| C* retVal = array_; | |||||
| array_ = NULL; | |||||
| return retVal; | |||||
| } | |||||
| private: | |||||
| C* array_; | |||||
| // Forbid comparison of different scoped_array types. | |||||
| template <class C2> bool operator==(scoped_array<C2> const& p2) const; | |||||
| template <class C2> bool operator!=(scoped_array<C2> const& p2) const; | |||||
| // Disallow evil constructors | |||||
| scoped_array(const scoped_array&); | |||||
| void operator=(const scoped_array&); | |||||
| }; | |||||
| // Free functions | |||||
| template <class C> | |||||
| void swap(scoped_array<C>& p1, scoped_array<C>& p2) { | |||||
| p1.swap(p2); | |||||
| } | |||||
| template <class C> | |||||
| bool operator==(C* p1, const scoped_array<C>& p2) { | |||||
| return p1 == p2.get(); | |||||
| } | |||||
| template <class C> | |||||
| bool operator!=(C* p1, const scoped_array<C>& p2) { | |||||
| return p1 != p2.get(); | |||||
| } | |||||
| // This class wraps the c library function free() in a class that can be | |||||
| // passed as a template argument to scoped_ptr_malloc below. | |||||
| class ScopedPtrMallocFree { | |||||
| public: | |||||
| inline void operator()(void* x) const { | |||||
| free(x); | |||||
| } | |||||
| }; | |||||
| // scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a | |||||
| // second template argument, the functor used to free the object. | |||||
| template<class C, class FreeProc = ScopedPtrMallocFree> | |||||
| class scoped_ptr_malloc { | |||||
| public: | |||||
| // The element type | |||||
| typedef C element_type; | |||||
| // Constructor. Defaults to initializing with NULL. | |||||
| // There is no way to create an uninitialized scoped_ptr. | |||||
| // The input parameter must be allocated with an allocator that matches the | |||||
| // Free functor. For the default Free functor, this is malloc, calloc, or | |||||
| // realloc. | |||||
| explicit scoped_ptr_malloc(C* p = NULL): ptr_(p) {} | |||||
| // Destructor. If there is a C object, call the Free functor. | |||||
| ~scoped_ptr_malloc() { | |||||
| free_(ptr_); | |||||
| } | |||||
| // Reset. Calls the Free functor on the current owned object, if any. | |||||
| // Then takes ownership of a new object, if given. | |||||
| // this->reset(this->get()) works. | |||||
| void reset(C* p = NULL) { | |||||
| if (ptr_ != p) { | |||||
| free_(ptr_); | |||||
| ptr_ = p; | |||||
| } | |||||
| } | |||||
| // Get the current object. | |||||
| // operator* and operator-> will cause an assert() failure if there is | |||||
| // no current object. | |||||
| C& operator*() const { | |||||
| assert(ptr_ != NULL); | |||||
| return *ptr_; | |||||
| } | |||||
| C* operator->() const { | |||||
| assert(ptr_ != NULL); | |||||
| return ptr_; | |||||
| } | |||||
| C* get() const { | |||||
| return ptr_; | |||||
| } | |||||
| // Comparison operators. | |||||
| // These return whether a scoped_ptr_malloc and a plain pointer refer | |||||
| // to the same object, not just to two different but equal objects. | |||||
| // For compatibility with the boost-derived implementation, these | |||||
| // take non-const arguments. | |||||
| bool operator==(C* p) const { | |||||
| return ptr_ == p; | |||||
| } | |||||
| bool operator!=(C* p) const { | |||||
| return ptr_ != p; | |||||
| } | |||||
| // Swap two scoped pointers. | |||||
| void swap(scoped_ptr_malloc & b) { | |||||
| C* tmp = b.ptr_; | |||||
| b.ptr_ = ptr_; | |||||
| ptr_ = tmp; | |||||
| } | |||||
| // Release a pointer. | |||||
| // The return value is the current pointer held by this object. | |||||
| // If this object holds a NULL pointer, the return value is NULL. | |||||
| // After this operation, this object will hold a NULL pointer, | |||||
| // and will not own the object any more. | |||||
| C* release() WARN_UNUSED_RESULT { | |||||
| C* tmp = ptr_; | |||||
| ptr_ = NULL; | |||||
| return tmp; | |||||
| } | |||||
| private: | |||||
| C* ptr_; | |||||
| // no reason to use these: each scoped_ptr_malloc should have its own object | |||||
| template <class C2, class GP> | |||||
| bool operator==(scoped_ptr_malloc<C2, GP> const& p) const; | |||||
| template <class C2, class GP> | |||||
| bool operator!=(scoped_ptr_malloc<C2, GP> const& p) const; | |||||
| static FreeProc const free_; | |||||
| // Disallow evil constructors | |||||
| scoped_ptr_malloc(const scoped_ptr_malloc&); | |||||
| void operator=(const scoped_ptr_malloc&); | |||||
| }; | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| // See the License for the specific language governing permissions and | |||||
| // limitations under the License. | |||||
| template<class C, class FP> | |||||
| FP const scoped_ptr_malloc<C, FP>::free_ = FP(); | |||||
| // Author: Philippe Liard | |||||
| template<class C, class FP> inline | |||||
| void swap(scoped_ptr_malloc<C, FP>& a, scoped_ptr_malloc<C, FP>& b) { | |||||
| a.swap(b); | |||||
| } | |||||
| #ifndef I18N_PHONENUMBERS_BASE_SCOPED_PTR_H_ | |||||
| #define I18N_PHONENUMBERS_BASE_SCOPED_PTR_H_ | |||||
| template<class C, class FP> inline | |||||
| bool operator==(C* p, const scoped_ptr_malloc<C, FP>& b) { | |||||
| return p == b.get(); | |||||
| } | |||||
| #include <boost/scoped_ptr.hpp> | |||||
| template<class C, class FP> inline | |||||
| bool operator!=(C* p, const scoped_ptr_malloc<C, FP>& b) { | |||||
| return p != b.get(); | |||||
| } | |||||
| using boost::scoped_ptr; | |||||
| #endif // BASE_SCOPED_PTR_H_ | |||||
| #endif // I18N_PHONENUMBERS_BASE_SCOPED_PTR_H_ | |||||
| @ -1,271 +1,52 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_SINGLETON_H_ | |||||
| #define BASE_SINGLETON_H_ | |||||
| #pragma once | |||||
| #include "base/at_exit.h" | |||||
| #include "base/atomicops.h" | |||||
| #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |||||
| #include "base/threading/platform_thread.h" | |||||
| #include "base/threading/thread_restrictions.h" | |||||
| // Default traits for Singleton<Type>. Calls operator new and operator delete on | |||||
| // the object. Registers automatic deletion at process exit. | |||||
| // Overload if you need arguments or another memory allocation function. | |||||
| template<typename Type> | |||||
| struct DefaultSingletonTraits { | |||||
| // Allocates the object. | |||||
| static Type* New() { | |||||
| // The parenthesis is very important here; it forces POD type | |||||
| // initialization. | |||||
| return new Type(); | |||||
| } | |||||
| // Destroys the object. | |||||
| static void Delete(Type* x) { | |||||
| delete x; | |||||
| } | |||||
| // Set to true to automatically register deletion of the object on process | |||||
| // exit. See below for the required call that makes this happen. | |||||
| static const bool kRegisterAtExit = true; | |||||
| // Set to false to disallow access on a non-joinable thread. This is | |||||
| // different from kRegisterAtExit because StaticMemorySingletonTraits allows | |||||
| // access on non-joinable threads, and gracefully handles this. | |||||
| static const bool kAllowedToAccessOnNonjoinableThread = false; | |||||
| }; | |||||
| // Alternate traits for use with the Singleton<Type>. Identical to | |||||
| // DefaultSingletonTraits except that the Singleton will not be cleaned up | |||||
| // at exit. | |||||
| template<typename Type> | |||||
| struct LeakySingletonTraits : public DefaultSingletonTraits<Type> { | |||||
| static const bool kRegisterAtExit = false; | |||||
| static const bool kAllowedToAccessOnNonjoinableThread = true; | |||||
| }; | |||||
| // Alternate traits for use with the Singleton<Type>. Allocates memory | |||||
| // for the singleton instance from a static buffer. The singleton will | |||||
| // be cleaned up at exit, but can't be revived after destruction unless | |||||
| // the Resurrect() method is called. | |||||
| // Copyright (C) 2011 Google Inc. | |||||
| // | // | ||||
| // This is useful for a certain category of things, notably logging and | |||||
| // tracing, where the singleton instance is of a type carefully constructed to | |||||
| // be safe to access post-destruction. | |||||
| // In logging and tracing you'll typically get stray calls at odd times, like | |||||
| // during static destruction, thread teardown and the like, and there's a | |||||
| // termination race on the heap-based singleton - e.g. if one thread calls | |||||
| // get(), but then another thread initiates AtExit processing, the first thread | |||||
| // may call into an object residing in unallocated memory. If the instance is | |||||
| // allocated from the data segment, then this is survivable. | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| // you may not use this file except in compliance with the License. | |||||
| // You may obtain a copy of the License at | |||||
| // | // | ||||
| // The destructor is to deallocate system resources, in this case to unregister | |||||
| // a callback the system will invoke when logging levels change. Note that | |||||
| // this is also used in e.g. Chrome Frame, where you have to allow for the | |||||
| // possibility of loading briefly into someone else's process space, and | |||||
| // so leaking is not an option, as that would sabotage the state of your host | |||||
| // process once you've unloaded. | |||||
| template <typename Type> | |||||
| struct StaticMemorySingletonTraits { | |||||
| // WARNING: User has to deal with get() in the singleton class | |||||
| // this is traits for returning NULL. | |||||
| static Type* New() { | |||||
| if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1)) | |||||
| return NULL; | |||||
| Type* ptr = reinterpret_cast<Type*>(buffer_); | |||||
| // We are protected by a memory barrier. | |||||
| new(ptr) Type(); | |||||
| return ptr; | |||||
| } | |||||
| static void Delete(Type* p) { | |||||
| base::subtle::NoBarrier_Store(&dead_, 1); | |||||
| base::subtle::MemoryBarrier(); | |||||
| if (p != NULL) | |||||
| p->Type::~Type(); | |||||
| } | |||||
| static const bool kRegisterAtExit = true; | |||||
| static const bool kAllowedToAccessOnNonjoinableThread = true; | |||||
| // Exposed for unittesting. | |||||
| static void Resurrect() { | |||||
| base::subtle::NoBarrier_Store(&dead_, 0); | |||||
| } | |||||
| private: | |||||
| static const size_t kBufferSize = (sizeof(Type) + | |||||
| sizeof(intptr_t) - 1) / sizeof(intptr_t); | |||||
| static intptr_t buffer_[kBufferSize]; | |||||
| // Signal the object was already deleted, so it is not revived. | |||||
| static base::subtle::Atomic32 dead_; | |||||
| }; | |||||
| template <typename Type> intptr_t | |||||
| StaticMemorySingletonTraits<Type>::buffer_[kBufferSize]; | |||||
| template <typename Type> base::subtle::Atomic32 | |||||
| StaticMemorySingletonTraits<Type>::dead_ = 0; | |||||
| // The Singleton<Type, Traits, DifferentiatingType> class manages a single | |||||
| // instance of Type which will be created on first use and will be destroyed at | |||||
| // normal process exit). The Trait::Delete function will not be called on | |||||
| // abnormal process exit. | |||||
| // | |||||
| // DifferentiatingType is used as a key to differentiate two different | |||||
| // singletons having the same memory allocation functions but serving a | |||||
| // different purpose. This is mainly used for Locks serving different purposes. | |||||
| // | |||||
| // Example usage: | |||||
| // | |||||
| // In your header: | |||||
| // #include "base/singleton.h" | |||||
| // class FooClass { | |||||
| // public: | |||||
| // static FooClass* GetInstance(); <-- See comment below on this. | |||||
| // void Bar() { ... } | |||||
| // private: | |||||
| // FooClass() { ... } | |||||
| // friend struct DefaultSingletonTraits<FooClass>; | |||||
| // | |||||
| // DISALLOW_COPY_AND_ASSIGN(FooClass); | |||||
| // }; | |||||
| // | |||||
| // In your source file: | |||||
| // FooClass* FooClass::GetInstance() { | |||||
| // return Singleton<FooClass>::get(); | |||||
| // } | |||||
| // | |||||
| // And to call methods on FooClass: | |||||
| // FooClass::GetInstance()->Bar(); | |||||
| // | |||||
| // NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance | |||||
| // and it is important that FooClass::GetInstance() is not inlined in the | |||||
| // header. This makes sure that when source files from multiple targets include | |||||
| // this header they don't end up with different copies of the inlined code | |||||
| // creating multiple copies of the singleton. | |||||
| // | |||||
| // Singleton<> has no non-static members and doesn't need to actually be | |||||
| // instantiated. | |||||
| // | |||||
| // This class is itself thread-safe. The underlying Type must of course be | |||||
| // thread-safe if you want to use it concurrently. Two parameters may be tuned | |||||
| // depending on the user's requirements. | |||||
| // | |||||
| // Glossary: | |||||
| // RAE = kRegisterAtExit | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | // | ||||
| // On every platform, if Traits::RAE is true, the singleton will be destroyed at | |||||
| // process exit. More precisely it uses base::AtExitManager which requires an | |||||
| // object of this type to be instantiated. AtExitManager mimics the semantics | |||||
| // of atexit() such as LIFO order but under Windows is safer to call. For more | |||||
| // information see at_exit.h. | |||||
| // | |||||
| // If Traits::RAE is false, the singleton will not be freed at process exit, | |||||
| // thus the singleton will be leaked if it is ever accessed. Traits::RAE | |||||
| // shouldn't be false unless absolutely necessary. Remember that the heap where | |||||
| // the object is allocated may be destroyed by the CRT anyway. | |||||
| // | |||||
| // Caveats: | |||||
| // (a) Every call to get(), operator->() and operator*() incurs some overhead | |||||
| // (16ns on my P4/2.8GHz) to check whether the object has already been | |||||
| // initialized. You may wish to cache the result of get(); it will not | |||||
| // change. | |||||
| // | |||||
| // (b) Your factory function must never throw an exception. This class is not | |||||
| // exception-safe. | |||||
| // | |||||
| template <typename Type, | |||||
| typename Traits = DefaultSingletonTraits<Type>, | |||||
| typename DifferentiatingType = Type> | |||||
| class Singleton { | |||||
| private: | |||||
| // Classes using the Singleton<T> pattern should declare a GetInstance() | |||||
| // method and call Singleton::get() from within that. | |||||
| friend Type* Type::GetInstance(); | |||||
| // This class is safe to be constructed and copy-constructed since it has no | |||||
| // member. | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| // See the License for the specific language governing permissions and | |||||
| // limitations under the License. | |||||
| // Return a pointer to the one true instance of the class. | |||||
| static Type* get() { | |||||
| if (!Traits::kAllowedToAccessOnNonjoinableThread) | |||||
| base::ThreadRestrictions::AssertSingletonAllowed(); | |||||
| // Author: Philippe Liard | |||||
| // Our AtomicWord doubles as a spinlock, where a value of | |||||
| // kBeingCreatedMarker means the spinlock is being held for creation. | |||||
| static const base::subtle::AtomicWord kBeingCreatedMarker = 1; | |||||
| #ifndef I18N_PHONENUMBERS_BASE_SINGLETON_H_ | |||||
| #define I18N_PHONENUMBERS_BASE_SINGLETON_H_ | |||||
| base::subtle::AtomicWord value = base::subtle::NoBarrier_Load(&instance_); | |||||
| if (value != 0 && value != kBeingCreatedMarker) { | |||||
| // See the corresponding HAPPENS_BEFORE below. | |||||
| ANNOTATE_HAPPENS_AFTER(&instance_); | |||||
| return reinterpret_cast<Type*>(value); | |||||
| } | |||||
| #include <boost/scoped_ptr.hpp> | |||||
| #include <boost/thread/once.hpp> | |||||
| #include <boost/utility.hpp> | |||||
| // Object isn't created yet, maybe we will get to create it, let's try... | |||||
| if (base::subtle::Acquire_CompareAndSwap(&instance_, | |||||
| 0, | |||||
| kBeingCreatedMarker) == 0) { | |||||
| // instance_ was NULL and is now kBeingCreatedMarker. Only one thread | |||||
| // will ever get here. Threads might be spinning on us, and they will | |||||
| // stop right after we do this store. | |||||
| Type* newval = Traits::New(); | |||||
| namespace i18n { | |||||
| namespace phonenumbers { | |||||
| // This annotation helps race detectors recognize correct lock-less | |||||
| // synchronization between different threads calling get(). | |||||
| // See the corresponding HAPPENS_AFTER below and above. | |||||
| ANNOTATE_HAPPENS_BEFORE(&instance_); | |||||
| base::subtle::Release_Store( | |||||
| &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval)); | |||||
| template <class T> | |||||
| class Singleton : private boost::noncopyable { | |||||
| public: | |||||
| virtual ~Singleton() {} | |||||
| if (newval != NULL && Traits::kRegisterAtExit) | |||||
| base::AtExitManager::RegisterCallback(OnExit, NULL); | |||||
| return newval; | |||||
| } | |||||
| // We hit a race. Another thread beat us and either: | |||||
| // - Has the object in BeingCreated state | |||||
| // - Already has the object created... | |||||
| // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr. | |||||
| // Unless your constructor can be very time consuming, it is very unlikely | |||||
| // to hit this race. When it does, we just spin and yield the thread until | |||||
| // the object has been created. | |||||
| while (true) { | |||||
| value = base::subtle::NoBarrier_Load(&instance_); | |||||
| if (value != kBeingCreatedMarker) | |||||
| break; | |||||
| base::PlatformThread::YieldCurrentThread(); | |||||
| } | |||||
| // See the corresponding HAPPENS_BEFORE above. | |||||
| ANNOTATE_HAPPENS_AFTER(&instance_); | |||||
| return reinterpret_cast<Type*>(value); | |||||
| static T* GetInstance() { | |||||
| boost::call_once(Init, flag); | |||||
| return instance; | |||||
| } | } | ||||
| // Adapter function for use with AtExit(). This should be called single | |||||
| // threaded, so don't use atomic operations. | |||||
| // Calling OnExit while singleton is in use by other threads is a mistake. | |||||
| static void OnExit(void* /*unused*/) { | |||||
| // AtExit should only ever be register after the singleton instance was | |||||
| // created. We should only ever get here with a valid instance_ pointer. | |||||
| Traits::Delete( | |||||
| reinterpret_cast<Type*>(base::subtle::NoBarrier_Load(&instance_))); | |||||
| instance_ = 0; | |||||
| private: | |||||
| static void Init() { | |||||
| instance.reset(new T()); | |||||
| } | } | ||||
| static base::subtle::AtomicWord instance_; | |||||
| static boost::scoped_ptr<T> instance; | |||||
| static boost::once_flag flag; | |||||
| }; | }; | ||||
| template <typename Type, typename Traits, typename DifferentiatingType> | |||||
| base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>:: | |||||
| instance_ = 0; | |||||
| template <class T> boost::scoped_ptr<T> Singleton<T>::instance = NULL; | |||||
| template <class T> boost::once_flag Singleton<T>::flag = BOOST_ONCE_INIT; | |||||
| } // namespace phonenumbers | |||||
| } // namespace i18n | |||||
| #endif // BASE_SINGLETON_H_ | |||||
| #endif // I18N_PHONENUMBERS_BASE_SINGLETON_H_ | |||||
| @ -1,41 +0,0 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // This file is used for debugging assertion support. The Lock class | |||||
| // is functionally a wrapper around the LockImpl class, so the only | |||||
| // real intelligence in the class is in the debugging logic. | |||||
| #if !defined(NDEBUG) | |||||
| #include "base/synchronization/lock.h" | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| Lock::Lock() : lock_() { | |||||
| owned_by_thread_ = false; | |||||
| owning_thread_id_ = static_cast<PlatformThreadId>(0); | |||||
| } | |||||
| void Lock::AssertAcquired() const { | |||||
| DCHECK(owned_by_thread_); | |||||
| DCHECK_EQ(owning_thread_id_, PlatformThread::CurrentId()); | |||||
| } | |||||
| void Lock::CheckHeldAndUnmark() { | |||||
| DCHECK(owned_by_thread_); | |||||
| DCHECK_EQ(owning_thread_id_, PlatformThread::CurrentId()); | |||||
| owned_by_thread_ = false; | |||||
| owning_thread_id_ = static_cast<PlatformThreadId>(0); | |||||
| } | |||||
| void Lock::CheckUnheldAndMark() { | |||||
| DCHECK(!owned_by_thread_); | |||||
| owned_by_thread_ = true; | |||||
| owning_thread_id_ = PlatformThread::CurrentId(); | |||||
| } | |||||
| } // namespace base | |||||
| #endif // NDEBUG | |||||
| @ -1,131 +1,27 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| #define BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| #pragma once | |||||
| #include "base/synchronization/lock_impl.h" | |||||
| #include "base/threading/platform_thread.h" | |||||
| // Copyright (C) 2011 Google Inc. | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| // you may not use this file except in compliance with the License. | |||||
| // You may obtain a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| // See the License for the specific language governing permissions and | |||||
| // limitations under the License. | |||||
| // Author: Philippe Liard | |||||
| #ifndef I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| #define I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| #include <boost/thread/mutex.hpp> | |||||
| namespace base { | namespace base { | ||||
| typedef boost::mutex Lock; | |||||
| typedef boost::mutex::scoped_lock AutoLock; | |||||
| } | |||||
| // A convenient wrapper for an OS specific critical section. The only real | |||||
| // intelligence in this class is in debug mode for the support for the | |||||
| // AssertAcquired() method. | |||||
| class Lock { | |||||
| public: | |||||
| #if defined(NDEBUG) // Optimized wrapper implementation | |||||
| Lock() : lock_() {} | |||||
| ~Lock() {} | |||||
| void Acquire() { lock_.Lock(); } | |||||
| void Release() { lock_.Unlock(); } | |||||
| // If the lock is not held, take it and return true. If the lock is already | |||||
| // held by another thread, immediately return false. This must not be called | |||||
| // by a thread already holding the lock (what happens is undefined and an | |||||
| // assertion may fail). | |||||
| bool Try() { return lock_.Try(); } | |||||
| // Null implementation if not debug. | |||||
| void AssertAcquired() const {} | |||||
| #else | |||||
| Lock(); | |||||
| ~Lock() {} | |||||
| // NOTE: Although windows critical sections support recursive locks, we do not | |||||
| // allow this, and we will commonly fire a DCHECK() if a thread attempts to | |||||
| // acquire the lock a second time (while already holding it). | |||||
| void Acquire() { | |||||
| lock_.Lock(); | |||||
| CheckUnheldAndMark(); | |||||
| } | |||||
| void Release() { | |||||
| CheckHeldAndUnmark(); | |||||
| lock_.Unlock(); | |||||
| } | |||||
| bool Try() { | |||||
| bool rv = lock_.Try(); | |||||
| if (rv) { | |||||
| CheckUnheldAndMark(); | |||||
| } | |||||
| return rv; | |||||
| } | |||||
| void AssertAcquired() const; | |||||
| #endif // NDEBUG | |||||
| #if defined(OS_POSIX) | |||||
| // The posix implementation of ConditionVariable needs to be able | |||||
| // to see our lock and tweak our debugging counters, as it releases | |||||
| // and acquires locks inside of pthread_cond_{timed,}wait. | |||||
| // Windows doesn't need to do this as it calls the Lock::* methods. | |||||
| friend class ConditionVariable; | |||||
| #endif | |||||
| private: | |||||
| #if !defined(NDEBUG) | |||||
| // Members and routines taking care of locks assertions. | |||||
| // Note that this checks for recursive locks and allows them | |||||
| // if the variable is set. This is allowed by the underlying implementation | |||||
| // on windows but not on Posix, so we're doing unneeded checks on Posix. | |||||
| // It's worth it to share the code. | |||||
| void CheckHeldAndUnmark(); | |||||
| void CheckUnheldAndMark(); | |||||
| // All private data is implicitly protected by lock_. | |||||
| // Be VERY careful to only access members under that lock. | |||||
| // Determines validity of owning_thread_id_. Needed as we don't have | |||||
| // a null owning_thread_id_ value. | |||||
| bool owned_by_thread_; | |||||
| base::PlatformThreadId owning_thread_id_; | |||||
| #endif // NDEBUG | |||||
| // Platform specific underlying lock implementation. | |||||
| internal::LockImpl lock_; | |||||
| DISALLOW_COPY_AND_ASSIGN(Lock); | |||||
| }; | |||||
| // A helper class that acquires the given Lock while the AutoLock is in scope. | |||||
| class AutoLock { | |||||
| public: | |||||
| explicit AutoLock(Lock& lock) : lock_(lock) { | |||||
| lock_.Acquire(); | |||||
| } | |||||
| ~AutoLock() { | |||||
| lock_.AssertAcquired(); | |||||
| lock_.Release(); | |||||
| } | |||||
| private: | |||||
| Lock& lock_; | |||||
| DISALLOW_COPY_AND_ASSIGN(AutoLock); | |||||
| }; | |||||
| // AutoUnlock is a helper that will Release() the |lock| argument in the | |||||
| // constructor, and re-Acquire() it in the destructor. | |||||
| class AutoUnlock { | |||||
| public: | |||||
| explicit AutoUnlock(Lock& lock) : lock_(lock) { | |||||
| // We require our caller to have the lock. | |||||
| lock_.AssertAcquired(); | |||||
| lock_.Release(); | |||||
| } | |||||
| ~AutoUnlock() { | |||||
| lock_.Acquire(); | |||||
| } | |||||
| private: | |||||
| Lock& lock_; | |||||
| DISALLOW_COPY_AND_ASSIGN(AutoUnlock); | |||||
| }; | |||||
| } // namespace base | |||||
| #endif // BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| #endif // I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_H_ | |||||
| @ -1,63 +0,0 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_ | |||||
| #define BASE_SYNCHRONIZATION_LOCK_IMPL_H_ | |||||
| #pragma once | |||||
| #include "build/build_config.h" | |||||
| #if defined(OS_WIN) | |||||
| #include <windows.h> | |||||
| #elif defined(OS_POSIX) | |||||
| #include <pthread.h> | |||||
| #endif | |||||
| #include "base/basictypes.h" | |||||
| namespace base { | |||||
| namespace internal { | |||||
| // This class implements the underlying platform-specific spin-lock mechanism | |||||
| // used for the Lock class. Most users should not use LockImpl directly, but | |||||
| // should instead use Lock. | |||||
| class LockImpl { | |||||
| public: | |||||
| #if defined(OS_WIN) | |||||
| typedef CRITICAL_SECTION OSLockType; | |||||
| #elif defined(OS_POSIX) | |||||
| typedef pthread_mutex_t OSLockType; | |||||
| #endif | |||||
| LockImpl(); | |||||
| ~LockImpl(); | |||||
| // If the lock is not held, take it and return true. If the lock is already | |||||
| // held by something else, immediately return false. | |||||
| bool Try(); | |||||
| // Take the lock, blocking until it is available if necessary. | |||||
| void Lock(); | |||||
| // Release the lock. This must only be called by the lock's holder: after | |||||
| // a successful call to Try, or a call to Lock. | |||||
| void Unlock(); | |||||
| // Return the native underlying lock. Not supported for Windows builds. | |||||
| // TODO(awalker): refactor lock and condition variables so that this is | |||||
| // unnecessary. | |||||
| #if !defined(OS_WIN) | |||||
| OSLockType* os_lock() { return &os_lock_; } | |||||
| #endif | |||||
| private: | |||||
| OSLockType os_lock_; | |||||
| DISALLOW_COPY_AND_ASSIGN(LockImpl); | |||||
| }; | |||||
| } // namespace internal | |||||
| } // namespace base | |||||
| #endif // BASE_SYNCHRONIZATION_LOCK_IMPL_H_ | |||||
| @ -1,54 +0,0 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/synchronization/lock_impl.h" | |||||
| #include <errno.h> | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| namespace internal { | |||||
| LockImpl::LockImpl() { | |||||
| #ifndef NDEBUG | |||||
| // In debug, setup attributes for lock error checking. | |||||
| pthread_mutexattr_t mta; | |||||
| int rv = pthread_mutexattr_init(&mta); | |||||
| DCHECK_EQ(rv, 0); | |||||
| rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK); | |||||
| DCHECK_EQ(rv, 0); | |||||
| rv = pthread_mutex_init(&os_lock_, &mta); | |||||
| DCHECK_EQ(rv, 0); | |||||
| rv = pthread_mutexattr_destroy(&mta); | |||||
| DCHECK_EQ(rv, 0); | |||||
| #else | |||||
| // In release, go with the default lock attributes. | |||||
| pthread_mutex_init(&os_lock_, NULL); | |||||
| #endif | |||||
| } | |||||
| LockImpl::~LockImpl() { | |||||
| int rv = pthread_mutex_destroy(&os_lock_); | |||||
| DCHECK_EQ(rv, 0); | |||||
| } | |||||
| bool LockImpl::Try() { | |||||
| int rv = pthread_mutex_trylock(&os_lock_); | |||||
| DCHECK(rv == 0 || rv == EBUSY); | |||||
| return rv == 0; | |||||
| } | |||||
| void LockImpl::Lock() { | |||||
| int rv = pthread_mutex_lock(&os_lock_); | |||||
| DCHECK_EQ(rv, 0); | |||||
| } | |||||
| void LockImpl::Unlock() { | |||||
| int rv = pthread_mutex_unlock(&os_lock_); | |||||
| DCHECK_EQ(rv, 0); | |||||
| } | |||||
| } // namespace internal | |||||
| } // namespace base | |||||
| @ -1,36 +0,0 @@ | |||||
| // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/synchronization/lock_impl.h" | |||||
| namespace base { | |||||
| namespace internal { | |||||
| LockImpl::LockImpl() { | |||||
| // The second parameter is the spin count, for short-held locks it avoid the | |||||
| // contending thread from going to sleep which helps performance greatly. | |||||
| ::InitializeCriticalSectionAndSpinCount(&os_lock_, 2000); | |||||
| } | |||||
| LockImpl::~LockImpl() { | |||||
| ::DeleteCriticalSection(&os_lock_); | |||||
| } | |||||
| bool LockImpl::Try() { | |||||
| if (::TryEnterCriticalSection(&os_lock_) != FALSE) { | |||||
| return true; | |||||
| } | |||||
| return false; | |||||
| } | |||||
| void LockImpl::Lock() { | |||||
| ::EnterCriticalSection(&os_lock_); | |||||
| } | |||||
| void LockImpl::Unlock() { | |||||
| ::LeaveCriticalSection(&os_lock_); | |||||
| } | |||||
| } // namespace internal | |||||
| } // namespace base | |||||
| @ -1,170 +0,0 @@ | |||||
| /* Copyright (c) 2008-2009, Google Inc. | |||||
| * All rights reserved. | |||||
| * | |||||
| * Redistribution and use in source and binary forms, with or without | |||||
| * modification, are permitted provided that the following conditions are | |||||
| * met: | |||||
| * | |||||
| * * Redistributions of source code must retain the above copyright | |||||
| * notice, this list of conditions and the following disclaimer. | |||||
| * * Neither the name of Google Inc. nor the names of its | |||||
| * contributors may be used to endorse or promote products derived from | |||||
| * this software without specific prior written permission. | |||||
| * | |||||
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||||
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||||
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||||
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||||
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||||
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||||
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||||
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||||
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||||
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||||
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||||
| * | |||||
| * --- | |||||
| * Author: Kostya Serebryany | |||||
| */ | |||||
| #ifdef _MSC_VER | |||||
| # include <windows.h> | |||||
| #endif | |||||
| #ifdef __cplusplus | |||||
| # error "This file should be built as pure C to avoid name mangling" | |||||
| #endif | |||||
| #include <stdlib.h> | |||||
| #include <string.h> | |||||
| #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |||||
| #ifdef __GNUC__ | |||||
| /* valgrind.h uses gcc extensions so it won't build with other compilers */ | |||||
| # include "base/third_party/valgrind/valgrind.h" | |||||
| #endif | |||||
| /* Each function is empty and called (via a macro) only in debug mode. | |||||
| The arguments are captured by dynamic tools at runtime. */ | |||||
| #if DYNAMIC_ANNOTATIONS_ENABLED == 1 | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)( | |||||
| const char *file, int line, const volatile void *lock){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)( | |||||
| const char *file, int line, const volatile void *lock){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)( | |||||
| const char *file, int line, const volatile void *lock, long is_w){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)( | |||||
| const char *file, int line, const volatile void *lock, long is_w){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)( | |||||
| const char *file, int line, const volatile void *barrier, long count, | |||||
| long reinitialization_allowed) {} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)( | |||||
| const char *file, int line, const volatile void *barrier) {} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)( | |||||
| const char *file, int line, const volatile void *barrier) {} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)( | |||||
| const char *file, int line, const volatile void *barrier) {} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)( | |||||
| const char *file, int line, const volatile void *cv, | |||||
| const volatile void *lock){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)( | |||||
| const char *file, int line, const volatile void *cv){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)( | |||||
| const char *file, int line, const volatile void *cv){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)( | |||||
| const char *file, int line, const volatile void *address, long size){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)( | |||||
| const char *file, int line, const volatile void *address, long size){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)( | |||||
| const char *file, int line, const volatile void *pcq){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)( | |||||
| const char *file, int line, const volatile void *pcq){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)( | |||||
| const char *file, int line, const volatile void *pcq){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)( | |||||
| const char *file, int line, const volatile void *pcq){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)( | |||||
| const char *file, int line, const volatile void *mem, long size){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)( | |||||
| const char *file, int line, const volatile void *mem, | |||||
| const char *description){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)( | |||||
| const char *file, int line, const volatile void *mem, | |||||
| const char *description){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)( | |||||
| const char *file, int line, const volatile void *mem, long size, | |||||
| const char *description){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)( | |||||
| const char *file, int line, const volatile void *mu){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)( | |||||
| const char *file, int line, const volatile void *mu){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)( | |||||
| const char *file, int line, const volatile void *arg){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)( | |||||
| const char *file, int line, const char *name){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)( | |||||
| const char *file, int line){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)( | |||||
| const char *file, int line, int enable){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)( | |||||
| const char *file, int line, const volatile void *arg){} | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)( | |||||
| const char *file, int line){} | |||||
| #endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1 */ | |||||
| #if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 | |||||
| static int GetRunningOnValgrind(void) { | |||||
| #ifdef RUNNING_ON_VALGRIND | |||||
| if (RUNNING_ON_VALGRIND) return 1; | |||||
| #endif | |||||
| #ifndef _MSC_VER | |||||
| char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND"); | |||||
| if (running_on_valgrind_str) { | |||||
| return strcmp(running_on_valgrind_str, "0") != 0; | |||||
| } | |||||
| #else | |||||
| /* Visual Studio issues warnings if we use getenv, | |||||
| * so we use GetEnvironmentVariableA instead. | |||||
| */ | |||||
| char value[100] = "1"; | |||||
| int res = GetEnvironmentVariableA("RUNNING_ON_VALGRIND", | |||||
| value, sizeof(value)); | |||||
| /* value will remain "1" if res == 0 or res >= sizeof(value). The latter | |||||
| * can happen only if the given value is long, in this case it can't be "0". | |||||
| */ | |||||
| if (res > 0 && strcmp(value, "0") != 0) | |||||
| return 1; | |||||
| #endif | |||||
| return 0; | |||||
| } | |||||
| /* See the comments in dynamic_annotations.h */ | |||||
| int RunningOnValgrind(void) { | |||||
| static volatile int running_on_valgrind = -1; | |||||
| /* C doesn't have thread-safe initialization of statics, and we | |||||
| don't want to depend on pthread_once here, so hack it. */ | |||||
| int local_running_on_valgrind = running_on_valgrind; | |||||
| if (local_running_on_valgrind == -1) | |||||
| running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind(); | |||||
| return local_running_on_valgrind; | |||||
| } | |||||
| #endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */ | |||||
| @ -1,590 +0,0 @@ | |||||
| /* Copyright (c) 2008-2009, Google Inc. | |||||
| * All rights reserved. | |||||
| * | |||||
| * Redistribution and use in source and binary forms, with or without | |||||
| * modification, are permitted provided that the following conditions are | |||||
| * met: | |||||
| * | |||||
| * * Redistributions of source code must retain the above copyright | |||||
| * notice, this list of conditions and the following disclaimer. | |||||
| * * Neither the name of Google Inc. nor the names of its | |||||
| * contributors may be used to endorse or promote products derived from | |||||
| * this software without specific prior written permission. | |||||
| * | |||||
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||||
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||||
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||||
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||||
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||||
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||||
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||||
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||||
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||||
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||||
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||||
| * | |||||
| * --- | |||||
| * Author: Kostya Serebryany | |||||
| */ | |||||
| /* This file defines dynamic annotations for use with dynamic analysis | |||||
| tool such as valgrind, PIN, etc. | |||||
| Dynamic annotation is a source code annotation that affects | |||||
| the generated code (that is, the annotation is not a comment). | |||||
| Each such annotation is attached to a particular | |||||
| instruction and/or to a particular object (address) in the program. | |||||
| The annotations that should be used by users are macros in all upper-case | |||||
| (e.g., ANNOTATE_NEW_MEMORY). | |||||
| Actual implementation of these macros may differ depending on the | |||||
| dynamic analysis tool being used. | |||||
| See http://code.google.com/p/data-race-test/ for more information. | |||||
| This file supports the following dynamic analysis tools: | |||||
| - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero). | |||||
| Macros are defined empty. | |||||
| - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1). | |||||
| Macros are defined as calls to non-inlinable empty functions | |||||
| that are intercepted by Valgrind. */ | |||||
| #ifndef __DYNAMIC_ANNOTATIONS_H__ | |||||
| #define __DYNAMIC_ANNOTATIONS_H__ | |||||
| #ifndef DYNAMIC_ANNOTATIONS_PREFIX | |||||
| # define DYNAMIC_ANNOTATIONS_PREFIX | |||||
| #endif | |||||
| #ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND | |||||
| # define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1 | |||||
| #endif | |||||
| #ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK | |||||
| # ifdef __GNUC__ | |||||
| # define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak)) | |||||
| # else | |||||
| /* TODO(glider): for Windows support we may want to change this macro in order | |||||
| to prepend __declspec(selectany) to the annotations' declarations. */ | |||||
| # error weak annotations are not supported for your compiler | |||||
| # endif | |||||
| #else | |||||
| # define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK | |||||
| #endif | |||||
| /* The following preprocessor magic prepends the value of | |||||
| DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */ | |||||
| #define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B | |||||
| #define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B) | |||||
| #define DYNAMIC_ANNOTATIONS_NAME(name) \ | |||||
| DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name) | |||||
| #ifndef DYNAMIC_ANNOTATIONS_ENABLED | |||||
| # define DYNAMIC_ANNOTATIONS_ENABLED 0 | |||||
| #endif | |||||
| #if DYNAMIC_ANNOTATIONS_ENABLED != 0 | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful when implementing condition variables such as CondVar, | |||||
| using conditional critical sections (Await/LockWhen) and when constructing | |||||
| user-defined synchronization mechanisms. | |||||
| The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can | |||||
| be used to define happens-before arcs in user-defined synchronization | |||||
| mechanisms: the race detector will infer an arc from the former to the | |||||
| latter when they share the same argument pointer. | |||||
| Example 1 (reference counting): | |||||
| void Unref() { | |||||
| ANNOTATE_HAPPENS_BEFORE(&refcount_); | |||||
| if (AtomicDecrementByOne(&refcount_) == 0) { | |||||
| ANNOTATE_HAPPENS_AFTER(&refcount_); | |||||
| delete this; | |||||
| } | |||||
| } | |||||
| Example 2 (message queue): | |||||
| void MyQueue::Put(Type *e) { | |||||
| MutexLock lock(&mu_); | |||||
| ANNOTATE_HAPPENS_BEFORE(e); | |||||
| PutElementIntoMyQueue(e); | |||||
| } | |||||
| Type *MyQueue::Get() { | |||||
| MutexLock lock(&mu_); | |||||
| Type *e = GetElementFromMyQueue(); | |||||
| ANNOTATE_HAPPENS_AFTER(e); | |||||
| return e; | |||||
| } | |||||
| Note: when possible, please use the existing reference counting and message | |||||
| queue implementations instead of inventing new ones. */ | |||||
| /* Report that wait on the condition variable at address "cv" has succeeded | |||||
| and the lock at address "lock" is held. */ | |||||
| #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock) | |||||
| /* Report that wait on the condition variable at "cv" has succeeded. Variant | |||||
| w/o lock. */ | |||||
| #define ANNOTATE_CONDVAR_WAIT(cv) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL) | |||||
| /* Report that we are about to signal on the condition variable at address | |||||
| "cv". */ | |||||
| #define ANNOTATE_CONDVAR_SIGNAL(cv) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv) | |||||
| /* Report that we are about to signal_all on the condition variable at address | |||||
| "cv". */ | |||||
| #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv) | |||||
| /* Annotations for user-defined synchronization mechanisms. */ | |||||
| #define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj) | |||||
| #define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj) | |||||
| /* DEPRECATED. Don't use it. */ | |||||
| #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \ | |||||
| pointer, size) | |||||
| /* DEPRECATED. Don't use it. */ | |||||
| #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \ | |||||
| pointer, size) | |||||
| /* DEPRECATED. Don't use it. */ | |||||
| #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \ | |||||
| do { \ | |||||
| ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \ | |||||
| ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \ | |||||
| } while (0) | |||||
| /* Instruct the tool to create a happens-before arc between mu->Unlock() and | |||||
| mu->Lock(). This annotation may slow down the race detector and hide real | |||||
| races. Normally it is used only when it would be difficult to annotate each | |||||
| of the mutex's critical sections individually using the annotations above. | |||||
| This annotation makes sense only for hybrid race detectors. For pure | |||||
| happens-before detectors this is a no-op. For more details see | |||||
| http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */ | |||||
| #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \ | |||||
| mu) | |||||
| /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. | |||||
| Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in | |||||
| pure happens-before mode. For a hybrid mode this is a no-op. */ | |||||
| #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu) | |||||
| /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ | |||||
| #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \ | |||||
| mu) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful when defining memory allocators, or when memory that | |||||
| was protected in one way starts to be protected in another. */ | |||||
| /* Report that a new memory at "address" of size "size" has been allocated. | |||||
| This might be used when the memory has been retrieved from a free list and | |||||
| is about to be reused, or when a the locking discipline for a variable | |||||
| changes. */ | |||||
| #define ANNOTATE_NEW_MEMORY(address, size) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \ | |||||
| size) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful when defining FIFO queues that transfer data between | |||||
| threads. */ | |||||
| /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at | |||||
| address "pcq" has been created. The ANNOTATE_PCQ_* annotations | |||||
| should be used only for FIFO queues. For non-FIFO queues use | |||||
| ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ | |||||
| #define ANNOTATE_PCQ_CREATE(pcq) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq) | |||||
| /* Report that the queue at address "pcq" is about to be destroyed. */ | |||||
| #define ANNOTATE_PCQ_DESTROY(pcq) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq) | |||||
| /* Report that we are about to put an element into a FIFO queue at address | |||||
| "pcq". */ | |||||
| #define ANNOTATE_PCQ_PUT(pcq) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq) | |||||
| /* Report that we've just got an element from a FIFO queue at address | |||||
| "pcq". */ | |||||
| #define ANNOTATE_PCQ_GET(pcq) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations that suppress errors. It is usually better to express the | |||||
| program's synchronization using the other annotations, but these can | |||||
| be used when all else fails. */ | |||||
| /* Report that we may have a benign race at "pointer", with size | |||||
| "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the | |||||
| point where "pointer" has been allocated, preferably close to the point | |||||
| where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ | |||||
| #define ANNOTATE_BENIGN_RACE(pointer, description) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \ | |||||
| pointer, sizeof(*(pointer)), description) | |||||
| /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to | |||||
| the memory range [address, address+size). */ | |||||
| #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \ | |||||
| address, size, description) | |||||
| /* Request the analysis tool to ignore all reads in the current thread | |||||
| until ANNOTATE_IGNORE_READS_END is called. | |||||
| Useful to ignore intentional racey reads, while still checking | |||||
| other reads and all writes. | |||||
| See also ANNOTATE_UNPROTECTED_READ. */ | |||||
| #define ANNOTATE_IGNORE_READS_BEGIN() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) | |||||
| /* Stop ignoring reads. */ | |||||
| #define ANNOTATE_IGNORE_READS_END() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) | |||||
| /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ | |||||
| #define ANNOTATE_IGNORE_WRITES_BEGIN() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) | |||||
| /* Stop ignoring writes. */ | |||||
| #define ANNOTATE_IGNORE_WRITES_END() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) | |||||
| /* Start ignoring all memory accesses (reads and writes). */ | |||||
| #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ | |||||
| do {\ | |||||
| ANNOTATE_IGNORE_READS_BEGIN();\ | |||||
| ANNOTATE_IGNORE_WRITES_BEGIN();\ | |||||
| }while(0)\ | |||||
| /* Stop ignoring all memory accesses. */ | |||||
| #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ | |||||
| do {\ | |||||
| ANNOTATE_IGNORE_WRITES_END();\ | |||||
| ANNOTATE_IGNORE_READS_END();\ | |||||
| }while(0)\ | |||||
| /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events: | |||||
| RWLOCK* and CONDVAR*. */ | |||||
| #define ANNOTATE_IGNORE_SYNC_BEGIN() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__) | |||||
| /* Stop ignoring sync events. */ | |||||
| #define ANNOTATE_IGNORE_SYNC_END() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__) | |||||
| /* Enable (enable!=0) or disable (enable==0) race detection for all threads. | |||||
| This annotation could be useful if you want to skip expensive race analysis | |||||
| during some period of program execution, e.g. during initialization. */ | |||||
| #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \ | |||||
| enable) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful for debugging. */ | |||||
| /* Request to trace every access to "address". */ | |||||
| #define ANNOTATE_TRACE_MEMORY(address) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address) | |||||
| /* Report the current thread name to a race detector. */ | |||||
| #define ANNOTATE_THREAD_NAME(name) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful when implementing locks. They are not | |||||
| normally needed by modules that merely use locks. | |||||
| The "lock" argument is a pointer to the lock object. */ | |||||
| /* Report that a lock has been created at address "lock". */ | |||||
| #define ANNOTATE_RWLOCK_CREATE(lock) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) | |||||
| /* Report that the lock at address "lock" is about to be destroyed. */ | |||||
| #define ANNOTATE_RWLOCK_DESTROY(lock) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) | |||||
| /* Report that the lock at address "lock" has been acquired. | |||||
| is_w=1 for writer lock, is_w=0 for reader lock. */ | |||||
| #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \ | |||||
| is_w) | |||||
| /* Report that the lock at address "lock" is about to be released. */ | |||||
| #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \ | |||||
| is_w) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful when implementing barriers. They are not | |||||
| normally needed by modules that merely use barriers. | |||||
| The "barrier" argument is a pointer to the barrier object. */ | |||||
| /* Report that the "barrier" has been initialized with initial "count". | |||||
| If 'reinitialization_allowed' is true, initialization is allowed to happen | |||||
| multiple times w/o calling barrier_destroy() */ | |||||
| #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \ | |||||
| count, reinitialization_allowed) | |||||
| /* Report that we are about to enter barrier_wait("barrier"). */ | |||||
| #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \ | |||||
| barrier) | |||||
| /* Report that we just exited barrier_wait("barrier"). */ | |||||
| #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \ | |||||
| barrier) | |||||
| /* Report that the "barrier" has been destroyed. */ | |||||
| #define ANNOTATE_BARRIER_DESTROY(barrier) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \ | |||||
| barrier) | |||||
| /* ------------------------------------------------------------- | |||||
| Annotations useful for testing race detectors. */ | |||||
| /* Report that we expect a race on the variable at "address". | |||||
| Use only in unit tests for a race detector. */ | |||||
| #define ANNOTATE_EXPECT_RACE(address, description) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \ | |||||
| description) | |||||
| #define ANNOTATE_FLUSH_EXPECTED_RACES() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__) | |||||
| /* A no-op. Insert where you like to test the interceptors. */ | |||||
| #define ANNOTATE_NO_OP(arg) \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg) | |||||
| /* Force the race detector to flush its state. The actual effect depends on | |||||
| * the implementation of the detector. */ | |||||
| #define ANNOTATE_FLUSH_STATE() \ | |||||
| DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__) | |||||
| #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ | |||||
| #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ | |||||
| #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ | |||||
| #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ | |||||
| #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ | |||||
| #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */ | |||||
| #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */ | |||||
| #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */ | |||||
| #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */ | |||||
| #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */ | |||||
| #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */ | |||||
| #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */ | |||||
| #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */ | |||||
| #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */ | |||||
| #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */ | |||||
| #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */ | |||||
| #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */ | |||||
| #define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */ | |||||
| #define ANNOTATE_PCQ_CREATE(pcq) /* empty */ | |||||
| #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */ | |||||
| #define ANNOTATE_PCQ_PUT(pcq) /* empty */ | |||||
| #define ANNOTATE_PCQ_GET(pcq) /* empty */ | |||||
| #define ANNOTATE_NEW_MEMORY(address, size) /* empty */ | |||||
| #define ANNOTATE_EXPECT_RACE(address, description) /* empty */ | |||||
| #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */ | |||||
| #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ | |||||
| #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ | |||||
| #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */ | |||||
| #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */ | |||||
| #define ANNOTATE_TRACE_MEMORY(arg) /* empty */ | |||||
| #define ANNOTATE_THREAD_NAME(name) /* empty */ | |||||
| #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ | |||||
| #define ANNOTATE_IGNORE_READS_END() /* empty */ | |||||
| #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ | |||||
| #define ANNOTATE_IGNORE_WRITES_END() /* empty */ | |||||
| #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ | |||||
| #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ | |||||
| #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */ | |||||
| #define ANNOTATE_IGNORE_SYNC_END() /* empty */ | |||||
| #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ | |||||
| #define ANNOTATE_NO_OP(arg) /* empty */ | |||||
| #define ANNOTATE_FLUSH_STATE() /* empty */ | |||||
| #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ | |||||
| /* Use the macros above rather than using these functions directly. */ | |||||
| #ifdef __cplusplus | |||||
| extern "C" { | |||||
| #endif | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)( | |||||
| const char *file, int line, | |||||
| const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)( | |||||
| const char *file, int line, | |||||
| const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)( | |||||
| const char *file, int line, | |||||
| const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)( | |||||
| const char *file, int line, | |||||
| const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)( | |||||
| const char *file, int line, const volatile void *barrier, long count, | |||||
| long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)( | |||||
| const char *file, int line, | |||||
| const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)( | |||||
| const char *file, int line, | |||||
| const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)( | |||||
| const char *file, int line, | |||||
| const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)( | |||||
| const char *file, int line, const volatile void *cv, | |||||
| const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)( | |||||
| const char *file, int line, | |||||
| const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)( | |||||
| const char *file, int line, | |||||
| const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)( | |||||
| const char *file, int line, | |||||
| const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)( | |||||
| const char *file, int line, | |||||
| const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)( | |||||
| const char *file, int line, | |||||
| const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)( | |||||
| const char *file, int line, | |||||
| const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)( | |||||
| const char *file, int line, | |||||
| const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)( | |||||
| const char *file, int line, | |||||
| const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)( | |||||
| const char *file, int line, | |||||
| const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)( | |||||
| const char *file, int line, const volatile void *mem, | |||||
| const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)( | |||||
| const char *file, int line, const volatile void *mem, | |||||
| const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)( | |||||
| const char *file, int line, const volatile void *mem, long size, | |||||
| const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)( | |||||
| const char *file, int line, | |||||
| const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)( | |||||
| const char *file, int line, | |||||
| const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)( | |||||
| const char *file, int line, | |||||
| const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)( | |||||
| const char *file, int line, | |||||
| const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)( | |||||
| const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)( | |||||
| const char *file, int line, | |||||
| const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)( | |||||
| const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; | |||||
| #if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 | |||||
| /* Return non-zero value if running under valgrind. | |||||
| If "valgrind.h" is included into dynamic_annotations.c, | |||||
| the regular valgrind mechanism will be used. | |||||
| See http://valgrind.org/docs/manual/manual-core-adv.html about | |||||
| RUNNING_ON_VALGRIND and other valgrind "client requests". | |||||
| The file "valgrind.h" may be obtained by doing | |||||
| svn co svn://svn.valgrind.org/valgrind/trunk/include | |||||
| If for some reason you can't use "valgrind.h" or want to fake valgrind, | |||||
| there are two ways to make this function return non-zero: | |||||
| - Use environment variable: export RUNNING_ON_VALGRIND=1 | |||||
| - Make your tool intercept the function RunningOnValgrind() and | |||||
| change its return value. | |||||
| */ | |||||
| int RunningOnValgrind(void); | |||||
| #endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */ | |||||
| #ifdef __cplusplus | |||||
| } | |||||
| #endif | |||||
| #if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) | |||||
| /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. | |||||
| Instead of doing | |||||
| ANNOTATE_IGNORE_READS_BEGIN(); | |||||
| ... = x; | |||||
| ANNOTATE_IGNORE_READS_END(); | |||||
| one can use | |||||
| ... = ANNOTATE_UNPROTECTED_READ(x); */ | |||||
| template <class T> | |||||
| inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { | |||||
| ANNOTATE_IGNORE_READS_BEGIN(); | |||||
| T res = x; | |||||
| ANNOTATE_IGNORE_READS_END(); | |||||
| return res; | |||||
| } | |||||
| /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ | |||||
| #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ | |||||
| namespace { \ | |||||
| class static_var ## _annotator { \ | |||||
| public: \ | |||||
| static_var ## _annotator() { \ | |||||
| ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ | |||||
| sizeof(static_var), \ | |||||
| # static_var ": " description); \ | |||||
| } \ | |||||
| }; \ | |||||
| static static_var ## _annotator the ## static_var ## _annotator;\ | |||||
| } | |||||
| #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ | |||||
| #define ANNOTATE_UNPROTECTED_READ(x) (x) | |||||
| #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ | |||||
| #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ | |||||
| #endif /* __DYNAMIC_ANNOTATIONS_H__ */ | |||||
| @ -1,99 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // WARNING: You should *NOT* be using this class directly. PlatformThread is | |||||
| // the low-level platform-specific abstraction to the OS's threading interface. | |||||
| // You should instead be using a message-loop driven Thread, see thread.h. | |||||
| #ifndef BASE_THREADING_PLATFORM_THREAD_H_ | |||||
| #define BASE_THREADING_PLATFORM_THREAD_H_ | |||||
| #pragma once | |||||
| #include "base/basictypes.h" | |||||
| #include "build/build_config.h" | |||||
| #if defined(OS_WIN) | |||||
| #include <windows.h> | |||||
| #elif defined(OS_POSIX) | |||||
| #include <pthread.h> | |||||
| #if defined(OS_MACOSX) | |||||
| #include <mach/mach.h> | |||||
| #else // OS_POSIX && !OS_MACOSX | |||||
| #include <unistd.h> | |||||
| #endif | |||||
| #endif | |||||
| namespace base { | |||||
| // PlatformThreadHandle should not be assumed to be a numeric type, since the | |||||
| // standard intends to allow pthread_t to be a structure. This means you | |||||
| // should not initialize it to a value, like 0. If it's a member variable, the | |||||
| // constructor can safely "value initialize" using () in the initializer list. | |||||
| #if defined(OS_WIN) | |||||
| typedef DWORD PlatformThreadId; | |||||
| typedef void* PlatformThreadHandle; // HANDLE | |||||
| const PlatformThreadHandle kNullThreadHandle = NULL; | |||||
| #elif defined(OS_POSIX) | |||||
| typedef pthread_t PlatformThreadHandle; | |||||
| const PlatformThreadHandle kNullThreadHandle = 0; | |||||
| #if defined(OS_MACOSX) | |||||
| typedef mach_port_t PlatformThreadId; | |||||
| #else // OS_POSIX && !OS_MACOSX | |||||
| typedef pid_t PlatformThreadId; | |||||
| #endif | |||||
| #endif | |||||
| const PlatformThreadId kInvalidThreadId = 0; | |||||
| // A namespace for low-level thread functions. | |||||
| class PlatformThread { | |||||
| public: | |||||
| // Implement this interface to run code on a background thread. Your | |||||
| // ThreadMain method will be called on the newly created thread. | |||||
| class Delegate { | |||||
| public: | |||||
| virtual ~Delegate() {} | |||||
| virtual void ThreadMain() = 0; | |||||
| }; | |||||
| // Gets the current thread id, which may be useful for logging purposes. | |||||
| static PlatformThreadId CurrentId(); | |||||
| // Yield the current thread so another thread can be scheduled. | |||||
| static void YieldCurrentThread(); | |||||
| // Sleeps for the specified duration (units are milliseconds). | |||||
| static void Sleep(int duration_ms); | |||||
| // Sets the thread name visible to a debugger. This has no effect otherwise. | |||||
| static void SetName(const char* name); | |||||
| // Creates a new thread. The |stack_size| parameter can be 0 to indicate | |||||
| // that the default stack size should be used. Upon success, | |||||
| // |*thread_handle| will be assigned a handle to the newly created thread, | |||||
| // and |delegate|'s ThreadMain method will be executed on the newly created | |||||
| // thread. | |||||
| // NOTE: When you are done with the thread handle, you must call Join to | |||||
| // release system resources associated with the thread. You must ensure that | |||||
| // the Delegate object outlives the thread. | |||||
| static bool Create(size_t stack_size, Delegate* delegate, | |||||
| PlatformThreadHandle* thread_handle); | |||||
| // CreateNonJoinable() does the same thing as Create() except the thread | |||||
| // cannot be Join()'d. Therefore, it also does not output a | |||||
| // PlatformThreadHandle. | |||||
| static bool CreateNonJoinable(size_t stack_size, Delegate* delegate); | |||||
| // Joins with a thread created via the Create function. This function blocks | |||||
| // the caller until the designated thread exits. This will invalidate | |||||
| // |thread_handle|. | |||||
| static void Join(PlatformThreadHandle thread_handle); | |||||
| private: | |||||
| DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread); | |||||
| }; | |||||
| } // namespace base | |||||
| #endif // BASE_THREADING_PLATFORM_THREAD_H_ | |||||
| @ -1,54 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/platform_thread.h" | |||||
| #import <Foundation/Foundation.h> | |||||
| #include <dlfcn.h> | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| // If Cocoa is to be used on more than one thread, it must know that the | |||||
| // application is multithreaded. Since it's possible to enter Cocoa code | |||||
| // from threads created by pthread_thread_create, Cocoa won't necessarily | |||||
| // be aware that the application is multithreaded. Spawning an NSThread is | |||||
| // enough to get Cocoa to set up for multithreaded operation, so this is done | |||||
| // if necessary before pthread_thread_create spawns any threads. | |||||
| // | |||||
| // http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html | |||||
| void InitThreading() { | |||||
| static BOOL multithreaded = [NSThread isMultiThreaded]; | |||||
| if (!multithreaded) { | |||||
| // +[NSObject class] is idempotent. | |||||
| [NSThread detachNewThreadSelector:@selector(class) | |||||
| toTarget:[NSObject class] | |||||
| withObject:nil]; | |||||
| multithreaded = YES; | |||||
| DCHECK([NSThread isMultiThreaded]); | |||||
| } | |||||
| } | |||||
| // static | |||||
| void PlatformThread::SetName(const char* name) { | |||||
| // pthread_setname_np is only available in 10.6 or later, so test | |||||
| // for it at runtime. | |||||
| int (*dynamic_pthread_setname_np)(const char*); | |||||
| *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = | |||||
| dlsym(RTLD_DEFAULT, "pthread_setname_np"); | |||||
| if (!dynamic_pthread_setname_np) | |||||
| return; | |||||
| // Mac OS X does not expose the length limit of the name, so | |||||
| // hardcode it. | |||||
| const int kMaxNameLength = 63; | |||||
| std::string shortened_name = std::string(name).substr(0, kMaxNameLength); | |||||
| // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does. | |||||
| // See http://crbug.com/47058 | |||||
| dynamic_pthread_setname_np(shortened_name.c_str()); | |||||
| } | |||||
| } // namespace base | |||||
| @ -1,225 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/platform_thread.h" | |||||
| #include <errno.h> | |||||
| #include <sched.h> | |||||
| #include "base/logging.h" | |||||
| #include "base/safe_strerror_posix.h" | |||||
| #include "base/scoped_ptr.h" | |||||
| #include "base/threading/thread_restrictions.h" | |||||
| #if defined(OS_MACOSX) | |||||
| #include <mach/mach.h> | |||||
| #include <sys/resource.h> | |||||
| #include <algorithm> | |||||
| #endif | |||||
| #if defined(OS_LINUX) | |||||
| #include <dlfcn.h> | |||||
| #include <sys/prctl.h> | |||||
| #include <sys/syscall.h> | |||||
| #include <unistd.h> | |||||
| #endif | |||||
| #if defined(OS_NACL) | |||||
| #include <sys/nacl_syscalls.h> | |||||
| #endif | |||||
| namespace base { | |||||
| #if defined(OS_MACOSX) | |||||
| void InitThreading(); | |||||
| #endif | |||||
| namespace { | |||||
| struct ThreadParams { | |||||
| PlatformThread::Delegate* delegate; | |||||
| bool joinable; | |||||
| }; | |||||
| void* ThreadFunc(void* params) { | |||||
| ThreadParams* thread_params = static_cast<ThreadParams*>(params); | |||||
| PlatformThread::Delegate* delegate = thread_params->delegate; | |||||
| if (!thread_params->joinable) | |||||
| base::ThreadRestrictions::SetSingletonAllowed(false); | |||||
| delete thread_params; | |||||
| delegate->ThreadMain(); | |||||
| return NULL; | |||||
| } | |||||
| bool CreateThread(size_t stack_size, bool joinable, | |||||
| PlatformThread::Delegate* delegate, | |||||
| PlatformThreadHandle* thread_handle) { | |||||
| #if defined(OS_MACOSX) | |||||
| base::InitThreading(); | |||||
| #endif // OS_MACOSX | |||||
| bool success = false; | |||||
| pthread_attr_t attributes; | |||||
| pthread_attr_init(&attributes); | |||||
| // Pthreads are joinable by default, so only specify the detached attribute if | |||||
| // the thread should be non-joinable. | |||||
| if (!joinable) { | |||||
| pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED); | |||||
| } | |||||
| #if defined(OS_MACOSX) | |||||
| // The Mac OS X default for a pthread stack size is 512kB. | |||||
| // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses | |||||
| // DEFAULT_STACK_SIZE for this purpose. | |||||
| // | |||||
| // 512kB isn't quite generous enough for some deeply recursive threads that | |||||
| // otherwise request the default stack size by specifying 0. Here, adopt | |||||
| // glibc's behavior as on Linux, which is to use the current stack size | |||||
| // limit (ulimit -s) as the default stack size. See | |||||
| // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To | |||||
| // avoid setting the limit below the Mac OS X default or the minimum usable | |||||
| // stack size, these values are also considered. If any of these values | |||||
| // can't be determined, or if stack size is unlimited (ulimit -s unlimited), | |||||
| // stack_size is left at 0 to get the system default. | |||||
| // | |||||
| // Mac OS X normally only applies ulimit -s to the main thread stack. On | |||||
| // contemporary OS X and Linux systems alike, this value is generally 8MB | |||||
| // or in that neighborhood. | |||||
| if (stack_size == 0) { | |||||
| size_t default_stack_size; | |||||
| struct rlimit stack_rlimit; | |||||
| if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 && | |||||
| getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 && | |||||
| stack_rlimit.rlim_cur != RLIM_INFINITY) { | |||||
| stack_size = std::max(std::max(default_stack_size, | |||||
| static_cast<size_t>(PTHREAD_STACK_MIN)), | |||||
| static_cast<size_t>(stack_rlimit.rlim_cur)); | |||||
| } | |||||
| } | |||||
| #endif // OS_MACOSX | |||||
| if (stack_size > 0) | |||||
| pthread_attr_setstacksize(&attributes, stack_size); | |||||
| ThreadParams* params = new ThreadParams; | |||||
| params->delegate = delegate; | |||||
| params->joinable = joinable; | |||||
| success = !pthread_create(thread_handle, &attributes, ThreadFunc, params); | |||||
| pthread_attr_destroy(&attributes); | |||||
| if (!success) | |||||
| delete params; | |||||
| return success; | |||||
| } | |||||
| } // namespace | |||||
| // static | |||||
| PlatformThreadId PlatformThread::CurrentId() { | |||||
| // Pthreads doesn't have the concept of a thread ID, so we have to reach down | |||||
| // into the kernel. | |||||
| #if defined(OS_MACOSX) | |||||
| return mach_thread_self(); | |||||
| #elif defined(OS_LINUX) | |||||
| return syscall(__NR_gettid); | |||||
| #elif defined(OS_FREEBSD) | |||||
| // TODO(BSD): find a better thread ID | |||||
| return reinterpret_cast<int64>(pthread_self()); | |||||
| #elif defined(OS_NACL) | |||||
| return pthread_self(); | |||||
| #endif | |||||
| } | |||||
| // static | |||||
| void PlatformThread::YieldCurrentThread() { | |||||
| sched_yield(); | |||||
| } | |||||
| // static | |||||
| void PlatformThread::Sleep(int duration_ms) { | |||||
| struct timespec sleep_time, remaining; | |||||
| // Contains the portion of duration_ms >= 1 sec. | |||||
| sleep_time.tv_sec = duration_ms / 1000; | |||||
| duration_ms -= sleep_time.tv_sec * 1000; | |||||
| // Contains the portion of duration_ms < 1 sec. | |||||
| sleep_time.tv_nsec = duration_ms * 1000 * 1000; // nanoseconds. | |||||
| while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR) | |||||
| sleep_time = remaining; | |||||
| } | |||||
| // Linux SetName is currently disabled, as we need to distinguish between | |||||
| // helper threads (where it's ok to make this call) and the main thread | |||||
| // (where making this call renames our process, causing tools like killall | |||||
| // to stop working). | |||||
| #if 0 && defined(OS_LINUX) | |||||
| // static | |||||
| void PlatformThread::SetName(const char* name) { | |||||
| // http://0pointer.de/blog/projects/name-your-threads.html | |||||
| // glibc recently added support for pthread_setname_np, but it's not | |||||
| // commonly available yet. So test for it at runtime. | |||||
| int (*dynamic_pthread_setname_np)(pthread_t, const char*); | |||||
| *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = | |||||
| dlsym(RTLD_DEFAULT, "pthread_setname_np"); | |||||
| if (dynamic_pthread_setname_np) { | |||||
| // This limit comes from glibc, which gets it from the kernel | |||||
| // (TASK_COMM_LEN). | |||||
| const int kMaxNameLength = 15; | |||||
| std::string shortened_name = std::string(name).substr(0, kMaxNameLength); | |||||
| int err = dynamic_pthread_setname_np(pthread_self(), | |||||
| shortened_name.c_str()); | |||||
| if (err < 0) | |||||
| LOG(ERROR) << "pthread_setname_np: " << safe_strerror(err); | |||||
| } else { | |||||
| // Implementing this function without glibc is simple enough. (We | |||||
| // don't do the name length clipping as above because it will be | |||||
| // truncated by the callee (see TASK_COMM_LEN above).) | |||||
| int err = prctl(PR_SET_NAME, name); | |||||
| if (err < 0) | |||||
| PLOG(ERROR) << "prctl(PR_SET_NAME)"; | |||||
| } | |||||
| } | |||||
| #elif defined(OS_MACOSX) | |||||
| // Mac is implemented in platform_thread_mac.mm. | |||||
| #else | |||||
| // static | |||||
| void PlatformThread::SetName(const char* /*name*/) { | |||||
| // Leave it unimplemented. | |||||
| // (This should be relatively simple to implement for the BSDs; I | |||||
| // just don't have one handy to test the code on.) | |||||
| } | |||||
| #endif // defined(OS_LINUX) | |||||
| // static | |||||
| bool PlatformThread::Create(size_t stack_size, Delegate* delegate, | |||||
| PlatformThreadHandle* thread_handle) { | |||||
| return CreateThread(stack_size, true /* joinable thread */, | |||||
| delegate, thread_handle); | |||||
| } | |||||
| // static | |||||
| bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) { | |||||
| PlatformThreadHandle unused; | |||||
| bool result = CreateThread(stack_size, false /* non-joinable thread */, | |||||
| delegate, &unused); | |||||
| return result; | |||||
| } | |||||
| // static | |||||
| void PlatformThread::Join(PlatformThreadHandle thread_handle) { | |||||
| // Joining another thread may block the current thread for a long time, since | |||||
| // the thread referred to by |thread_handle| may still be running long-lived / | |||||
| // blocking tasks. | |||||
| base::ThreadRestrictions::AssertIOAllowed(); | |||||
| pthread_join(thread_handle, NULL); | |||||
| } | |||||
| } // namespace base | |||||
| @ -1,147 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/platform_thread.h" | |||||
| #include "base/logging.h" | |||||
| #include "base/threading/thread_restrictions.h" | |||||
| #include "base/win/windows_version.h" | |||||
| namespace base { | |||||
| namespace { | |||||
| // The information on how to set the thread name comes from | |||||
| // a MSDN article: http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx | |||||
| const DWORD kVCThreadNameException = 0x406D1388; | |||||
| typedef struct tagTHREADNAME_INFO { | |||||
| DWORD dwType; // Must be 0x1000. | |||||
| LPCSTR szName; // Pointer to name (in user addr space). | |||||
| DWORD dwThreadID; // Thread ID (-1=caller thread). | |||||
| DWORD dwFlags; // Reserved for future use, must be zero. | |||||
| } THREADNAME_INFO; | |||||
| struct ThreadParams { | |||||
| PlatformThread::Delegate* delegate; | |||||
| bool joinable; | |||||
| }; | |||||
| DWORD __stdcall ThreadFunc(void* params) { | |||||
| ThreadParams* thread_params = static_cast<ThreadParams*>(params); | |||||
| PlatformThread::Delegate* delegate = thread_params->delegate; | |||||
| if (!thread_params->joinable) | |||||
| base::ThreadRestrictions::SetSingletonAllowed(false); | |||||
| delete thread_params; | |||||
| delegate->ThreadMain(); | |||||
| return NULL; | |||||
| } | |||||
| // CreateThreadInternal() matches PlatformThread::Create(), except that | |||||
| // |out_thread_handle| may be NULL, in which case a non-joinable thread is | |||||
| // created. | |||||
| bool CreateThreadInternal(size_t stack_size, | |||||
| PlatformThread::Delegate* delegate, | |||||
| PlatformThreadHandle* out_thread_handle) { | |||||
| PlatformThreadHandle thread_handle; | |||||
| unsigned int flags = 0; | |||||
| if (stack_size > 0 && base::win::GetVersion() >= base::win::VERSION_XP) { | |||||
| flags = STACK_SIZE_PARAM_IS_A_RESERVATION; | |||||
| } else { | |||||
| stack_size = 0; | |||||
| } | |||||
| ThreadParams* params = new ThreadParams; | |||||
| params->delegate = delegate; | |||||
| params->joinable = out_thread_handle != NULL; | |||||
| // Using CreateThread here vs _beginthreadex makes thread creation a bit | |||||
| // faster and doesn't require the loader lock to be available. Our code will | |||||
| // have to work running on CreateThread() threads anyway, since we run code | |||||
| // on the Windows thread pool, etc. For some background on the difference: | |||||
| // http://www.microsoft.com/msj/1099/win32/win321099.aspx | |||||
| thread_handle = CreateThread( | |||||
| NULL, stack_size, ThreadFunc, params, flags, NULL); | |||||
| if (!thread_handle) { | |||||
| delete params; | |||||
| return false; | |||||
| } | |||||
| if (out_thread_handle) | |||||
| *out_thread_handle = thread_handle; | |||||
| else | |||||
| CloseHandle(thread_handle); | |||||
| return true; | |||||
| } | |||||
| } // namespace | |||||
| // static | |||||
| PlatformThreadId PlatformThread::CurrentId() { | |||||
| return GetCurrentThreadId(); | |||||
| } | |||||
| // static | |||||
| void PlatformThread::YieldCurrentThread() { | |||||
| ::Sleep(0); | |||||
| } | |||||
| // static | |||||
| void PlatformThread::Sleep(int duration_ms) { | |||||
| ::Sleep(duration_ms); | |||||
| } | |||||
| // static | |||||
| void PlatformThread::SetName(const char* name) { | |||||
| // The debugger needs to be around to catch the name in the exception. If | |||||
| // there isn't a debugger, we are just needlessly throwing an exception. | |||||
| if (!::IsDebuggerPresent()) | |||||
| return; | |||||
| THREADNAME_INFO info; | |||||
| info.dwType = 0x1000; | |||||
| info.szName = name; | |||||
| info.dwThreadID = CurrentId(); | |||||
| info.dwFlags = 0; | |||||
| __try { | |||||
| RaiseException(kVCThreadNameException, 0, sizeof(info)/sizeof(DWORD), | |||||
| reinterpret_cast<DWORD_PTR*>(&info)); | |||||
| } __except(EXCEPTION_CONTINUE_EXECUTION) { | |||||
| } | |||||
| } | |||||
| // static | |||||
| bool PlatformThread::Create(size_t stack_size, Delegate* delegate, | |||||
| PlatformThreadHandle* thread_handle) { | |||||
| DCHECK(thread_handle); | |||||
| return CreateThreadInternal(stack_size, delegate, thread_handle); | |||||
| } | |||||
| // static | |||||
| bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) { | |||||
| return CreateThreadInternal(stack_size, delegate, NULL); | |||||
| } | |||||
| // static | |||||
| void PlatformThread::Join(PlatformThreadHandle thread_handle) { | |||||
| DCHECK(thread_handle); | |||||
| // TODO(willchan): Enable this check once I can get it to work for Windows | |||||
| // shutdown. | |||||
| // Joining another thread may block the current thread for a long time, since | |||||
| // the thread referred to by |thread_handle| may still be running long-lived / | |||||
| // blocking tasks. | |||||
| #if 0 | |||||
| base::ThreadRestrictions::AssertIOAllowed(); | |||||
| #endif | |||||
| // Wait for the thread to exit. It should already have terminated but make | |||||
| // sure this assumption is valid. | |||||
| DWORD result = WaitForSingleObject(thread_handle, INFINITE); | |||||
| DCHECK_EQ(WAIT_OBJECT_0, result); | |||||
| CloseHandle(thread_handle); | |||||
| } | |||||
| } // namespace base | |||||
| @ -1,127 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| // WARNING: Thread local storage is a bit tricky to get right. Please make | |||||
| // sure that this is really the proper solution for what you're trying to | |||||
| // achieve. Don't prematurely optimize, most likely you can just use a Lock. | |||||
| // | |||||
| // These classes implement a wrapper around the platform's TLS storage | |||||
| // mechanism. On construction, they will allocate a TLS slot, and free the | |||||
| // TLS slot on destruction. No memory management (creation or destruction) is | |||||
| // handled. This means for uses of ThreadLocalPointer, you must correctly | |||||
| // manage the memory yourself, these classes will not destroy the pointer for | |||||
| // you. There are no at-thread-exit actions taken by these classes. | |||||
| // | |||||
| // ThreadLocalPointer<Type> wraps a Type*. It performs no creation or | |||||
| // destruction, so memory management must be handled elsewhere. The first call | |||||
| // to Get() on a thread will return NULL. You can update the pointer with a | |||||
| // call to Set(). | |||||
| // | |||||
| // ThreadLocalBoolean wraps a bool. It will default to false if it has never | |||||
| // been set otherwise with Set(). | |||||
| // | |||||
| // Thread Safety: An instance of ThreadLocalStorage is completely thread safe | |||||
| // once it has been created. If you want to dynamically create an instance, | |||||
| // you must of course properly deal with safety and race conditions. This | |||||
| // means a function-level static initializer is generally inappropiate. | |||||
| // | |||||
| // Example usage: | |||||
| // // My class is logically attached to a single thread. We cache a pointer | |||||
| // // on the thread it was created on, so we can implement current(). | |||||
| // MyClass::MyClass() { | |||||
| // DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL); | |||||
| // Singleton<ThreadLocalPointer<MyClass> >::get()->Set(this); | |||||
| // } | |||||
| // | |||||
| // MyClass::~MyClass() { | |||||
| // DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() != NULL); | |||||
| // Singleton<ThreadLocalPointer<MyClass> >::get()->Set(NULL); | |||||
| // } | |||||
| // | |||||
| // // Return the current MyClass associated with the calling thread, can be | |||||
| // // NULL if there isn't a MyClass associated. | |||||
| // MyClass* MyClass::current() { | |||||
| // return Singleton<ThreadLocalPointer<MyClass> >::get()->Get(); | |||||
| // } | |||||
| #ifndef BASE_THREADING_THREAD_LOCAL_H_ | |||||
| #define BASE_THREADING_THREAD_LOCAL_H_ | |||||
| #pragma once | |||||
| #include "base/basictypes.h" | |||||
| #if defined(OS_POSIX) | |||||
| #include <pthread.h> | |||||
| #endif | |||||
| namespace base { | |||||
| namespace internal { | |||||
| // Helper functions that abstract the cross-platform APIs. Do not use directly. | |||||
| struct ThreadLocalPlatform { | |||||
| #if defined(OS_WIN) | |||||
| typedef unsigned long SlotType; | |||||
| #elif defined(OS_POSIX) | |||||
| typedef pthread_key_t SlotType; | |||||
| #endif | |||||
| static void AllocateSlot(SlotType& slot); | |||||
| static void FreeSlot(SlotType& slot); | |||||
| static void* GetValueFromSlot(SlotType& slot); | |||||
| static void SetValueInSlot(SlotType& slot, void* value); | |||||
| }; | |||||
| } // namespace internal | |||||
| template <typename Type> | |||||
| class ThreadLocalPointer { | |||||
| public: | |||||
| ThreadLocalPointer() : slot_() { | |||||
| internal::ThreadLocalPlatform::AllocateSlot(slot_); | |||||
| } | |||||
| ~ThreadLocalPointer() { | |||||
| internal::ThreadLocalPlatform::FreeSlot(slot_); | |||||
| } | |||||
| Type* Get() { | |||||
| return static_cast<Type*>( | |||||
| internal::ThreadLocalPlatform::GetValueFromSlot(slot_)); | |||||
| } | |||||
| void Set(Type* ptr) { | |||||
| internal::ThreadLocalPlatform::SetValueInSlot(slot_, ptr); | |||||
| } | |||||
| private: | |||||
| typedef internal::ThreadLocalPlatform::SlotType SlotType; | |||||
| SlotType slot_; | |||||
| DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>); | |||||
| }; | |||||
| class ThreadLocalBoolean { | |||||
| public: | |||||
| ThreadLocalBoolean() { } | |||||
| ~ThreadLocalBoolean() { } | |||||
| bool Get() { | |||||
| return tlp_.Get() != NULL; | |||||
| } | |||||
| void Set(bool val) { | |||||
| tlp_.Set(reinterpret_cast<void*>(val ? 1 : 0)); | |||||
| } | |||||
| private: | |||||
| ThreadLocalPointer<void> tlp_; | |||||
| DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean); | |||||
| }; | |||||
| } // namespace base | |||||
| #endif // BASE_THREADING_THREAD_LOCAL_H_ | |||||
| @ -1,40 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/thread_local.h" | |||||
| #include <pthread.h> | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| namespace internal { | |||||
| // static | |||||
| void ThreadLocalPlatform::AllocateSlot(SlotType& slot) { | |||||
| int error = pthread_key_create(&slot, NULL); | |||||
| CHECK_EQ(error, 0); | |||||
| } | |||||
| // static | |||||
| void ThreadLocalPlatform::FreeSlot(SlotType& slot) { | |||||
| int error = pthread_key_delete(slot); | |||||
| DCHECK(error == 0); | |||||
| } | |||||
| // static | |||||
| void* ThreadLocalPlatform::GetValueFromSlot(SlotType& slot) { | |||||
| return pthread_getspecific(slot); | |||||
| } | |||||
| // static | |||||
| void ThreadLocalPlatform::SetValueInSlot(SlotType& slot, void* value) { | |||||
| int error = pthread_setspecific(slot, value); | |||||
| CHECK_EQ(error, 0); | |||||
| } | |||||
| } // namespace internal | |||||
| } // namespace base | |||||
| @ -1,42 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/thread_local.h" | |||||
| #include <windows.h> | |||||
| #include "base/logging.h" | |||||
| namespace base { | |||||
| namespace internal { | |||||
| // static | |||||
| void ThreadLocalPlatform::AllocateSlot(SlotType& slot) { | |||||
| slot = TlsAlloc(); | |||||
| CHECK_NE(slot, TLS_OUT_OF_INDEXES); | |||||
| } | |||||
| // static | |||||
| void ThreadLocalPlatform::FreeSlot(SlotType& slot) { | |||||
| if (!TlsFree(slot)) { | |||||
| NOTREACHED() << "Failed to deallocate tls slot with TlsFree()."; | |||||
| } | |||||
| } | |||||
| // static | |||||
| void* ThreadLocalPlatform::GetValueFromSlot(SlotType& slot) { | |||||
| return TlsGetValue(slot); | |||||
| } | |||||
| // static | |||||
| void ThreadLocalPlatform::SetValueInSlot(SlotType& slot, void* value) { | |||||
| if (!TlsSetValue(slot, value)) { | |||||
| LOG(FATAL) << "Failed to TlsSetValue()."; | |||||
| } | |||||
| } | |||||
| } // namespace internal | |||||
| } // namespace base | |||||
| @ -1,63 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #include "base/threading/thread_restrictions.h" | |||||
| // This entire file is compiled out in Release mode. | |||||
| #ifndef NDEBUG | |||||
| #include "base/lazy_instance.h" | |||||
| #include "base/logging.h" | |||||
| #include "base/threading/thread_local.h" | |||||
| namespace base { | |||||
| namespace { | |||||
| LazyInstance<ThreadLocalBoolean, LeakyLazyInstanceTraits<ThreadLocalBoolean> > | |||||
| g_io_disallowed(LINKER_INITIALIZED); | |||||
| LazyInstance<ThreadLocalBoolean, LeakyLazyInstanceTraits<ThreadLocalBoolean> > | |||||
| g_singleton_disallowed(LINKER_INITIALIZED); | |||||
| } // anonymous namespace | |||||
| // static | |||||
| bool ThreadRestrictions::SetIOAllowed(bool allowed) { | |||||
| bool previous_disallowed = g_io_disallowed.Get().Get(); | |||||
| g_io_disallowed.Get().Set(!allowed); | |||||
| return !previous_disallowed; | |||||
| } | |||||
| // static | |||||
| void ThreadRestrictions::AssertIOAllowed() { | |||||
| if (g_io_disallowed.Get().Get()) { | |||||
| LOG(FATAL) << | |||||
| "Function marked as IO-only was called from a thread that " | |||||
| "disallows IO! If this thread really should be allowed to " | |||||
| "make IO calls, adjust the call to " | |||||
| "base::ThreadRestrictions::SetIOAllowed() in this thread's " | |||||
| "startup."; | |||||
| } | |||||
| } | |||||
| bool ThreadRestrictions::SetSingletonAllowed(bool allowed) { | |||||
| bool previous_disallowed = g_singleton_disallowed.Get().Get(); | |||||
| g_singleton_disallowed.Get().Set(!allowed); | |||||
| return !previous_disallowed; | |||||
| } | |||||
| // static | |||||
| void ThreadRestrictions::AssertSingletonAllowed() { | |||||
| if (g_singleton_disallowed.Get().Get()) { | |||||
| LOG(FATAL) << "LazyInstance/Singleton is not allowed to be used on this " | |||||
| << "thread. Most likely it's because this thread is not " | |||||
| << "joinable, so AtExitManager may have deleted the object " | |||||
| << "on shutdown, leading to a potential shutdown crash."; | |||||
| } | |||||
| } | |||||
| } // namespace base | |||||
| #endif // NDEBUG | |||||
| @ -1,100 +0,0 @@ | |||||
| // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style license that can be | |||||
| // found in the LICENSE file. | |||||
| #ifndef BASE_THREADING_THREAD_RESTRICTIONS_H_ | |||||
| #define BASE_THREADING_THREAD_RESTRICTIONS_H_ | |||||
| #include "base/basictypes.h" | |||||
| namespace base { | |||||
| // Certain behavior is disallowed on certain threads. ThreadRestrictions helps | |||||
| // enforce these rules. Examples of such rules: | |||||
| // | |||||
| // * Do not do blocking IO (makes the thread janky) | |||||
| // * Do not access Singleton/LazyInstance (may lead to shutdown crashes) | |||||
| // | |||||
| // Here's more about how the protection works: | |||||
| // | |||||
| // 1) If a thread should not be allowed to make IO calls, mark it: | |||||
| // base::ThreadRestrictions::SetIOAllowed(false); | |||||
| // By default, threads *are* allowed to make IO calls. | |||||
| // In Chrome browser code, IO calls should be proxied to the File thread. | |||||
| // | |||||
| // 2) If a function makes a call that will go out to disk, check whether the | |||||
| // current thread is allowed: | |||||
| // base::ThreadRestrictions::AssertIOAllowed(); | |||||
| // | |||||
| // ThreadRestrictions does nothing in release builds; it is debug-only. | |||||
| // | |||||
| // Style tip: where should you put AssertIOAllowed checks? It's best | |||||
| // if you put them as close to the disk access as possible, at the | |||||
| // lowest level. This rule is simple to follow and helps catch all | |||||
| // callers. For example, if your function GoDoSomeBlockingDiskCall() | |||||
| // only calls other functions in Chrome and not fopen(), you should go | |||||
| // add the AssertIOAllowed checks in the helper functions. | |||||
| class ThreadRestrictions { | |||||
| public: | |||||
| // Constructing a ScopedAllowIO temporarily allows IO for the current | |||||
| // thread. Doing this is almost certainly always incorrect. | |||||
| class ScopedAllowIO { | |||||
| public: | |||||
| ScopedAllowIO() { previous_value_ = SetIOAllowed(true); } | |||||
| ~ScopedAllowIO() { SetIOAllowed(previous_value_); } | |||||
| private: | |||||
| // Whether IO is allowed when the ScopedAllowIO was constructed. | |||||
| bool previous_value_; | |||||
| DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO); | |||||
| }; | |||||
| // Constructing a ScopedAllowSingleton temporarily allows accessing for the | |||||
| // current thread. Doing this is almost always incorrect. | |||||
| class ScopedAllowSingleton { | |||||
| public: | |||||
| ScopedAllowSingleton() { previous_value_ = SetSingletonAllowed(true); } | |||||
| ~ScopedAllowSingleton() { SetSingletonAllowed(previous_value_); } | |||||
| private: | |||||
| // Whether singleton use is allowed when the ScopedAllowSingleton was | |||||
| // constructed. | |||||
| bool previous_value_; | |||||
| DISALLOW_COPY_AND_ASSIGN(ScopedAllowSingleton); | |||||
| }; | |||||
| #ifndef NDEBUG | |||||
| // Set whether the current thread to make IO calls. | |||||
| // Threads start out in the *allowed* state. | |||||
| // Returns the previous value. | |||||
| static bool SetIOAllowed(bool allowed); | |||||
| // Check whether the current thread is allowed to make IO calls, | |||||
| // and DCHECK if not. See the block comment above the class for | |||||
| // a discussion of where to add these checks. | |||||
| static void AssertIOAllowed(); | |||||
| // Set whether the current thread can use singletons. Returns the previous | |||||
| // value. | |||||
| static bool SetSingletonAllowed(bool allowed); | |||||
| // Check whether the current thread is allowed to use singletons (Singleton / | |||||
| // LazyInstance). DCHECKs if not. | |||||
| static void AssertSingletonAllowed(); | |||||
| #else | |||||
| // In Release builds, inline the empty definitions of these functions so | |||||
| // that they can be compiled out. | |||||
| static bool SetIOAllowed(bool allowed) { return true; } | |||||
| static void AssertIOAllowed() {} | |||||
| static bool SetSingletonAllowed(bool allowed) { return true; } | |||||
| static void AssertSingletonAllowed() {} | |||||
| #endif | |||||
| private: | |||||
| DISALLOW_IMPLICIT_CONSTRUCTORS(ThreadRestrictions); | |||||
| }; | |||||
| } // namespace base | |||||
| #endif // BASE_THREADING_THREAD_RESTRICTIONS_H_ | |||||