diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index 065fcd2f956e2d62f8394ed0015be40e4420bfa9..7be8463d32b392aa76b5ccf0040c718210fb3a84 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -9361,6 +9361,11 @@ build_binary_op_trapv (enum tree_code code, tree gnu_type, tree left,
   /* If no operand is a constant, we use the generic implementation.  */
   if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (rhs) != INTEGER_CST)
     {
+      /* First convert the operands to the result type like build_binary_op.
+	 This is where the bias is made explicit for biased types.  */
+      lhs = convert (gnu_type, lhs);
+      rhs = convert (gnu_type, rhs);
+
       /* Never inline a 64-bit mult for a 32-bit target, it's way too long.  */
       if (code == MULT_EXPR && precision == 64 && BITS_PER_WORD < 64)
 	{
diff --git a/gcc/testsuite/gnat.dg/bias2.adb b/gcc/testsuite/gnat.dg/bias2.adb
new file mode 100644
index 0000000000000000000000000000000000000000..a32e9a337ad7ea8a5fb236bdeeb0a0dd251a4ca5
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/bias2.adb
@@ -0,0 +1,33 @@
+-- { dg-do run }
+
+procedure Bias2 is
+
+  type Biased_T is range 1 .. 2 ** 6;
+  for Biased_T'Size use 6;  --  { dg-warning "biased representation" }
+  X, Y : Biased_T;
+
+begin
+  X := 1;
+  Y := 1;
+  if X + Y /= 2 then
+    raise Program_Error;
+  end if;
+
+  X := 2;
+  Y := 1;
+  if X - Y /= 1 then
+    raise Program_Error;
+  end if;
+
+  X := 2;
+  Y := 3;
+  if X * Y /= 6 then
+    raise Program_Error;
+  end if;
+
+  X := 24;
+  Y := 3;
+  if X / Y /= 8 then
+    raise Program_Error;
+  end if;
+end;