diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 55966de403482d372b64013e1158096a8b8c8cc1..0474038530c91a34d6ddfb11b14413268845de0d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,11 @@ +2016-05-27 Kyrylo Tkachov <kyrylo.tkachov@arm.com> + + * config/aarch64/aarch64.c (aarch64_fusion_enabled_p): New function. + * config/aarch64/aarch64-protos.h (aarch64_fusion_enabled_p): Declare + prototype. + * config/aarch64/aarch64-simd.md (aarch64_crypto_aes<aesmc_op>v16qi): + Add "=w,0" alternative. Enable it when AES/AESMC fusion is enabled. + 2016-05-27 Jiong Wang <jiong.wang@arm.com> PR target/63596 diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 1b20cf9da9ef98119315bb7573c161bc71ed00ae..dcf1748d82f8672fd9100f48f66cf590228b88c1 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -287,6 +287,7 @@ bool aarch64_constant_address_p (rtx); bool aarch64_expand_movmem (rtx *); bool aarch64_float_const_zero_rtx_p (rtx); bool aarch64_function_arg_regno_p (unsigned); +bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs); bool aarch64_gen_movmemqi (rtx *); bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); bool aarch64_is_extend_from_extract (machine_mode, rtx, rtx); diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 3318c2155f551c4ccd35188b2bedee5bf14ba2b0..fe1c22bfdf7e90fda391a91dea610b39b653bfa8 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -5401,13 +5401,25 @@ [(set_attr "type" "crypto_aese")] ) +;; When AES/AESMC fusion is enabled we want the register allocation to +;; look like: +;; AESE Vn, _ +;; AESMC Vn, Vn +;; So prefer to tie operand 1 to operand 0 when fusing. + (define_insn "aarch64_crypto_aes<aesmc_op>v16qi" - [(set (match_operand:V16QI 0 "register_operand" "=w") - (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")] + [(set (match_operand:V16QI 0 "register_operand" "=w,w") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0,w")] CRYPTO_AESMC))] "TARGET_SIMD && TARGET_CRYPTO" "aes<aesmc_op>\\t%0.16b, %1.16b" - [(set_attr "type" "crypto_aesmc")] + [(set_attr "type" "crypto_aesmc") + (set_attr_alternative "enabled" + [(if_then_else (match_test + "aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)") + (const_string "yes" ) + (const_string "no")) + (const_string "yes")])] ) ;; sha1 diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 2315939ebfad3bad3a514b716a8f23df23971537..d2ac25ce339dbfb061260b8148e42a35f2d441e3 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -13181,6 +13181,14 @@ aarch_macro_fusion_pair_p (rtx_insn *prev, rtx_insn *curr) return false; } +/* Return true iff the instruction fusion described by OP is enabled. */ + +bool +aarch64_fusion_enabled_p (enum aarch64_fusion_pairs op) +{ + return (aarch64_tune_params.fusible_ops & op) != 0; +} + /* If MEM is in the form of [base+offset], extract the two parts of address and set to BASE and OFFSET, otherwise return false after clearing BASE and OFFSET. */