diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 70e5b571efcb396529d68d1edba5706ecbedd318..f361e2aaf088a8c41b9d934b1739b3f1b29869fc 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2012-10-13 Jason Merrill <jason@redhat.com> + + * g++.dg/tls/thread_local7g.C: Require tls_native. + 2012-10-14 Jason Merrill <jason@redhat.com> * g++.dg/cpp0x/inh-ctor1.C: New. diff --git a/gcc/testsuite/g++.dg/tls/thread_local7g.C b/gcc/testsuite/g++.dg/tls/thread_local7g.C index 6960598173aa3e34b2bed5202e86c9bb534ab61e..3479aeb31fa7fe8ecbaf9267797fa1754a211ca8 100644 --- a/gcc/testsuite/g++.dg/tls/thread_local7g.C +++ b/gcc/testsuite/g++.dg/tls/thread_local7g.C @@ -3,7 +3,7 @@ // { dg-require-alias } // The reference temp should be TLS, not normal data. -// { dg-final { scan-assembler-not "\\.data" } } +// { dg-final { scan-assembler-not "\\.data" { target tls_native } } } thread_local int&& ir = 42; diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index e23ef31d55024a611d392fb350664acae2e68f43..e6c16dbc9302fe7ccfb8ac217de96a6a9591675b 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,9 @@ +2012-10-14 Jason Merrill <jason@redhat.com> + + PR target/54908 + * libsupc++/atexit_thread.cc: Rewrite to keep the cleanup list + with get/setspecific. Destroy the key on dlclose. + 2012-10-12 Edward Smith-Rowland <3dw4rd@verizon.net> * include/ext/random: Add __gnu_cxx::arcsine_distribution<> diff --git a/libstdc++-v3/libsupc++/atexit_thread.cc b/libstdc++-v3/libsupc++/atexit_thread.cc index 5e47708d93472884eed9246d4bef11ef1dc25999..95bdcf09dec079554b5dca6a5ff6a8d6c594b86d 100644 --- a/libstdc++-v3/libsupc++/atexit_thread.cc +++ b/libstdc++-v3/libsupc++/atexit_thread.cc @@ -27,109 +27,92 @@ #include "bits/gthr.h" namespace { - // Data structure for the list of destructors: Singly-linked list - // of arrays. - class list + // One element in a singly-linked stack of cleanups. + struct elt { - struct elt - { - void *object; - void (*destructor)(void *); - }; - - static const int max_nelts = 32; - - list *next; - int nelts; - elt array[max_nelts]; - - elt *allocate_elt(); - public: - void run(); - static void run(void *p); - int add_elt(void (*)(void *), void *); + void (*destructor)(void *); + void *object; + elt *next; }; - // Return the address of an open slot. - list::elt * - list::allocate_elt() - { - if (nelts < max_nelts) - return &array[nelts++]; - if (!next) - next = new (std::nothrow) list(); - if (!next) - return 0; - return next->allocate_elt(); - } + // Keep a per-thread list of cleanups in gthread_key storage. + __gthread_key_t key; + // But also support non-threaded mode. + elt *single_thread; - // Run all the cleanups in the list. - void - list::run() + // Run the specified stack of cleanups. + void run (void *p) { - for (int i = nelts - 1; i >= 0; --i) - array[i].destructor (array[i].object); - if (next) - next->run(); + elt *e = static_cast<elt*>(p); + for (; e; e = e->next) + e->destructor (e->object); } - // Static version to use as a callback to __gthread_key_create. - void - list::run(void *p) + // Run the stack of cleanups for the current thread. + void run () { - static_cast<list *>(p)->run(); + void *e; + if (__gthread_active_p ()) + e = __gthread_getspecific (key); + else + e = single_thread; + run (e); } - // The list of cleanups is per-thread. - thread_local list first; - - // The pthread data structures for actually running the destructors at - // thread exit are shared. The constructor of the thread-local sentinel - // object in add_elt performs the initialization. - __gthread_key_t key; - __gthread_once_t once = __GTHREAD_ONCE_INIT; - void run_current () { first.run(); } + // Initialize the key for the cleanup stack. We use a static local for + // key init/delete rather than atexit so that delete is run on dlclose. void key_init() { - __gthread_key_create (&key, list::run); + struct key_s { + key_s() { __gthread_key_create (&key, run); } + ~key_s() { __gthread_key_delete (key); } + }; + static key_s ks; // Also make sure the destructors are run by std::exit. // FIXME TLS cleanups should run before static cleanups and atexit // cleanups. - std::atexit (run_current); + std::atexit (run); } - struct sentinel - { - sentinel() +} + +extern "C" int +__cxxabiv1::__cxa_thread_atexit (void (*dtor)(void *), void *obj, void */*dso_handle*/) + _GLIBCXX_NOTHROW +{ + // Do this initialization once. + if (__gthread_active_p ()) + { + // When threads are active use __gthread_once. + static __gthread_once_t once = __GTHREAD_ONCE_INIT; + __gthread_once (&once, key_init); + } + else { - if (__gthread_active_p ()) + // And when threads aren't active use a static local guard. + static bool queued; + if (!queued) { - __gthread_once (&once, key_init); - __gthread_setspecific (key, &first); + queued = true; + std::atexit (run); } - else - std::atexit (run_current); } - }; - // Actually insert an element. - int - list::add_elt(void (*dtor)(void *), void *obj) - { - thread_local sentinel s; - elt *e = allocate_elt (); - if (!e) - return -1; - e->object = obj; - e->destructor = dtor; - return 0; - } -} + elt *first; + if (__gthread_active_p ()) + first = static_cast<elt*>(__gthread_getspecific (key)); + else + first = single_thread; -namespace __cxxabiv1 -{ - extern "C" int - __cxa_thread_atexit (void (*dtor)(void *), void *obj, void */*dso_handle*/) - _GLIBCXX_NOTHROW - { - return first.add_elt (dtor, obj); - } + elt *new_elt = new (std::nothrow) elt; + if (!new_elt) + return -1; + new_elt->destructor = dtor; + new_elt->object = obj; + new_elt->next = first; + + if (__gthread_active_p ()) + __gthread_setspecific (key, new_elt); + else + single_thread = new_elt; + + return 0; }