From 2a5195d96565c6886fe9a686ecd4cbb2fa5c0e0b Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor <iant@google.com>
Date: Tue, 1 Jan 2013 16:13:20 +0000
Subject: [PATCH] re PR other/55536 (libbacktrace abort in backtrace_alloc at
 mmap.c:99 running btest)

	PR other/55536
	* mmap.c (backtrace_alloc): Don't call sync functions if not
	threaded.
	(backtrace_free): Likewise.

From-SVN: r194768
---
 libbacktrace/ChangeLog |  8 ++++++++
 libbacktrace/mmap.c    | 24 ++++++++++++++++++++----
 2 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog
index 9cc767b936b3..39802c311094 100644
--- a/libbacktrace/ChangeLog
+++ b/libbacktrace/ChangeLog
@@ -1,3 +1,10 @@
+2013-01-01  Ian Lance Taylor  <iant@google.com>
+
+	PR other/55536
+	* mmap.c (backtrace_alloc): Don't call sync functions if not
+	threaded.
+	(backtrace_free): Likewise.
+
 2012-12-12  John David Anglin  <dave.anglin@nrc-cnrc.gc.ca>
 
 	* mmapio.c: Define MAP_FAILED if not defined.
@@ -26,6 +33,7 @@
 
 	PR other/55312
 	* configure.ac: Only add -Werror if building a target library.
+	* configure: Rebuild.
 
 2012-11-12  Ian Lance Taylor  <iant@google.com>
 	    Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c
index d3313c7cf1ec..a6c730ecd23f 100644
--- a/libbacktrace/mmap.c
+++ b/libbacktrace/mmap.c
@@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state,
 		 void *data)
 {
   void *ret;
+  int locked;
   struct backtrace_freelist_struct **pp;
   size_t pagesize;
   size_t asksize;
@@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state,
      using mmap.  __sync_lock_test_and_set returns the old state of
      the lock, so we have acquired it if it returns 0.  */
 
-  if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+  if (!state->threaded)
+    locked = 1;
+  else
+    locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+  if (locked)
     {
       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
 	{
@@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state,
 	    }
 	}
 
-      __sync_lock_release (&state->lock_alloc);
+      if (state->threaded)
+	__sync_lock_release (&state->lock_alloc);
     }
 
   if (ret == NULL)
@@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size,
 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
 		void *data ATTRIBUTE_UNUSED)
 {
+  int locked;
+
   /* If we can acquire the lock, add the new space to the free list.
      If we can't acquire the lock, just leak the memory.
      __sync_lock_test_and_set returns the old state of the lock, so we
      have acquired it if it returns 0.  */
-  if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+
+  if (!state->threaded)
+    locked = 1;
+  else
+    locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+  if (locked)
     {
       backtrace_free_locked (state, addr, size);
 
-      __sync_lock_release (&state->lock_alloc);
+      if (state->threaded)
+	__sync_lock_release (&state->lock_alloc);
     }
 }
 
-- 
GitLab