diff --git a/gcc/config/riscv/thead.cc b/gcc/config/riscv/thead.cc index 707d91076eb5fe8d3d5a8ab109aeb11f5984b348..baf74cffb5c63e1e372a370194c34252bab025f0 100644 --- a/gcc/config/riscv/thead.cc +++ b/gcc/config/riscv/thead.cc @@ -285,19 +285,27 @@ th_mempair_operands_p (rtx operands[4], bool load_p, if (MEM_VOLATILE_P (mem_1) || MEM_VOLATILE_P (mem_2)) return false; - /* If we have slow unaligned access, we only accept aligned memory. */ - if (riscv_slow_unaligned_access_p - && known_lt (MEM_ALIGN (mem_1), GET_MODE_SIZE (mode) * BITS_PER_UNIT)) - return false; /* Check if the addresses are in the form of [base+offset]. */ bool reversed = false; if (!th_mempair_check_consecutive_mems (mode, &mem_1, &mem_2, &reversed)) return false; + /* If necessary, reverse the local copy of the operands to simplify + testing of alignments and mempair operand. */ + if (reversed) + { + std::swap (mem_1, mem_2); + std::swap (reg_1, reg_2); + } + + /* If we have slow unaligned access, we only accept aligned memory. */ + if (riscv_slow_unaligned_access_p + && known_lt (MEM_ALIGN (mem_1), GET_MODE_SIZE (mode) * BITS_PER_UNIT)) + return false; + /* The first memory accesses must be a mempair operand. */ - if ((!reversed && !th_mempair_operand_p (mem_1, mode)) - || (reversed && !th_mempair_operand_p (mem_2, mode))) + if (!th_mempair_operand_p (mem_1, mode)) return false; /* The operands must be of the same size. */ diff --git a/gcc/testsuite/gcc.target/riscv/pr116720.c b/gcc/testsuite/gcc.target/riscv/pr116720.c new file mode 100644 index 0000000000000000000000000000000000000000..0f795aba0bf11f55f0fc1bf219e13cce30fa4955 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/pr116720.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -march=rv32ixtheadmempair -mabi=ilp32 -mno-strict-align" } */ + +struct a { + signed : 22; +}; +volatile short b; +int *c; +void d(int e, struct a) { + b; + c = &e; +}