diff --git a/libffi/ChangeLog b/libffi/ChangeLog
index 46d41da08131cc8609c1bb1988220bfb5c1dead7..45082049797d54a7268194d8fe352b0fc0d1ab50 100644
--- a/libffi/ChangeLog
+++ b/libffi/ChangeLog
@@ -1,3 +1,9 @@
+2004-11-22  Eric Botcazou  <ebotcazou@libertysurf.fr>
+
+	* src/sparc/ffi.c (ffi_prep_closure): Align doubles and 64-bit integers
+	on a 8-byte boundary.
+	* src/sparc/v8.S (ffi_closure_v8): Reserve frame space for arguments.
+
 2004-10-27  Richard Earnshaw  <rearnsha@arm.com>
 
 	* src/arm/ffi.c (ffi_prep_cif_machdep): Handle functions that return
diff --git a/libffi/src/sparc/ffi.c b/libffi/src/sparc/ffi.c
index ad15bee986c8891cc62dc00a7c12d39e506d6814..b83d63dedc775d1855e8f9438662a82bafcf9729 100644
--- a/libffi/src/sparc/ffi.c
+++ b/libffi/src/sparc/ffi.c
@@ -470,7 +470,7 @@ ffi_prep_closure (ffi_closure* closure,
 
 int
 ffi_closure_sparc_inner_v8(ffi_closure *closure,
-  void *rvalue, unsigned long *gpr)
+  void *rvalue, unsigned long *gpr, unsigned long *scratch)
 {
   ffi_cif *cif;
   ffi_type **arg_types;
@@ -505,6 +505,19 @@ ffi_closure_sparc_inner_v8(ffi_closure *closure,
 	  /* Straight copy of invisible reference.  */
 	  avalue[i] = (void *)gpr[argn++];
 	}
+      else if ((arg_types[i]->type == FFI_TYPE_DOUBLE
+	       || arg_types[i]->type == FFI_TYPE_SINT64
+	       || arg_types[i]->type == FFI_TYPE_UINT64)
+	       /* gpr is 8-byte aligned.  */
+	       && (argn % 2) != 0)
+	{
+	  /* Align on a 8-byte boundary.  */
+	  scratch[0] = gpr[argn];
+	  scratch[1] = gpr[argn+1];
+	  avalue[i] = scratch;
+	  scratch -= 2;
+	  argn += 2;
+	}
       else
 	{
 	  /* Always right-justify.  */
diff --git a/libffi/src/sparc/v8.S b/libffi/src/sparc/v8.S
index aaa7be7b4c8e1ee6f6c5b4e7d14cb87759ed6900..709423ce906aed2a7c7bf1bd16e529a78f3e8989 100644
--- a/libffi/src/sparc/v8.S
+++ b/libffi/src/sparc/v8.S
@@ -115,7 +115,15 @@ ffi_closure_v8:
 		.register	%g2, #scratch
 #endif
 .LLFB2:
-	save	%sp, -STACKFRAME, %sp
+	! Reserve frame space for all arguments in case
+	! we need to align them on a 8-byte boundary.
+	ld	[%g2+FFI_TRAMPOLINE_SIZE], %g1
+	ld	[%g1+4], %g1
+	sll	%g1, 3, %g1
+	add	%g1, STACKFRAME, %g1
+	! %g1 == STACKFRAME + 8*nargs
+	neg	%g1
+	save	%sp, %g1, %sp
 .LLCFI1:
 
 	! Store all of the potential argument registers in va_list format.
@@ -129,8 +137,9 @@ ffi_closure_v8:
 	! Call ffi_closure_sparc_inner to do the bulk of the work.
 	mov	%g2, %o0
 	add	%fp, -8, %o1
+	add	%fp,  64, %o2
 	call	ffi_closure_sparc_inner_v8
-	 add	%fp,  64, %o2
+	 add	%fp, -16, %o3
 
 	! Load up the return value in the proper type.
 	! See ffi_prep_cif_machdep for the list of cases.
@@ -157,6 +166,7 @@ ffi_closure_v8:
 	be	done2
 
 	! FFI_TYPE_SINT64
+	! FFI_TYPE_UINT64
 	ld	[%fp-4], %i1
 
 integer: