summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Coplan <alex.coplan@arm.com>2020-05-11 15:18:46 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2020-05-11 15:18:46 +0100
commitd572ad49217c09ca09e382774fdc6c407db4fc20 (patch)
tree2ac5c7daf04199adf84fd3a406a8ead415ec738e
parentfa853214b8f62d9df04e9bd956d6a8f0e28fd5a7 (diff)
[PATCH] aarch64: prefer using csinv, csneg in zero extend contexts
Given the C code: unsigned long long inv(unsigned a, unsigned b, unsigned c) { return a ? b : ~c; } Prior to this patch, AArch64 GCC at -O2 generates: inv: cmp w0, 0 mvn w2, w2 csel w0, w1, w2, ne ret and after applying the patch, we get: inv: cmp w0, 0 csinv w0, w1, w2, ne ret The new pattern also catches the optimization for the symmetric case where the body of foo reads a ? ~b : c. Similarly, with the following code: unsigned long long neg(unsigned a, unsigned b, unsigned c) { return a ? b : -c; } GCC at -O2 previously gave: neg: cmp w0, 0 neg w2, w2 csel w0, w1, w2, ne but now gives: neg: cmp w0, 0 csneg w0, w1, w2, ne ret with the corresponding code for the symmetric case as above. 2020-05-11 Alex Coplan <alex.coplan@arm.com> gcc/ * config/aarch64/aarch64.c (aarch64_if_then_else_costs): Add case to correctly calculate cost for new pattern (*csinv3_uxtw_insn3). * config/aarch64/aarch64.md (*csinv3_utxw_insn1): New. (*csinv3_uxtw_insn2): New. (*csinv3_uxtw_insn3): New. * config/aarch64/iterators.md (neg_not_cs): New. gcc/testsuite/ * gcc.target/aarch64/csinv-neg.c: New test.
-rw-r--r--gcc/ChangeLog9
-rw-r--r--gcc/config/aarch64/aarch64.c7
-rw-r--r--gcc/config/aarch64/aarch64.md38
-rw-r--r--gcc/config/aarch64/iterators.md3
-rw-r--r--gcc/testsuite/ChangeLog4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/csinv-neg.c104
6 files changed, 165 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 0a98c7441e1..c4856f422a3 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,12 @@
+2020-05-11 Alex Coplan <alex.coplan@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_if_then_else_costs): Add case
+ to correctly calculate cost for new pattern (*csinv3_uxtw_insn3).
+ * config/aarch64/aarch64.md (*csinv3_utxw_insn1): New.
+ (*csinv3_uxtw_insn2): New.
+ (*csinv3_uxtw_insn3): New.
+ * config/aarch64/iterators.md (neg_not_cs): New.
+
2020-05-11 Uroš Bizjak <ubizjak@gmail.com>
PR target/95046
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index e92c7e69fcb..434e095cb66 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -11695,6 +11695,13 @@ aarch64_if_then_else_costs (rtx op0, rtx op1, rtx op2, int *cost, bool speed)
op1 = XEXP (op1, 0);
op2 = XEXP (op2, 0);
}
+ else if (GET_CODE (op1) == ZERO_EXTEND && op2 == const0_rtx)
+ {
+ inner = XEXP (op1, 0);
+ if (GET_CODE (inner) == NEG || GET_CODE (inner) == NOT)
+ /* CSINV/NEG with zero extend + const 0 (*csinv3_uxtw_insn3). */
+ op1 = XEXP (inner, 0);
+ }
*cost += rtx_cost (op1, VOIDmode, IF_THEN_ELSE, 1, speed);
*cost += rtx_cost (op2, VOIDmode, IF_THEN_ELSE, 2, speed);
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index ff15505d455..b2cfd015530 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -4391,6 +4391,44 @@
[(set_attr "type" "csel")]
)
+(define_insn "*csinv3_uxtw_insn1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (if_then_else:DI
+ (match_operand 1 "aarch64_comparison_operation" "")
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))
+ (zero_extend:DI
+ (NEG_NOT:SI (match_operand:SI 3 "register_operand" "r")))))]
+ ""
+ "cs<neg_not_cs>\\t%w0, %w2, %w3, %m1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "*csinv3_uxtw_insn2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (if_then_else:DI
+ (match_operand 1 "aarch64_comparison_operation" "")
+ (zero_extend:DI
+ (NEG_NOT:SI (match_operand:SI 2 "register_operand" "r")))
+ (zero_extend:DI
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "cs<neg_not_cs>\\t%w0, %w3, %w2, %M1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "*csinv3_uxtw_insn3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (if_then_else:DI
+ (match_operand 1 "aarch64_comparison_operation" "")
+ (zero_extend:DI
+ (NEG_NOT:SI (match_operand:SI 2 "register_operand" "r")))
+ (const_int 0)))]
+ ""
+ "cs<neg_not_cs>\\t%w0, wzr, %w2, %M1"
+ [(set_attr "type" "csel")]
+)
+
;; If X can be loaded by a single CNT[BHWD] instruction,
;;
;; A = UMAX (B, X)
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 8e434389e59..a568cf21b99 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1932,6 +1932,9 @@
;; Operation names for negate and bitwise complement.
(define_code_attr neg_not_op [(neg "neg") (not "not")])
+;; csinv, csneg insn suffixes.
+(define_code_attr neg_not_cs [(neg "neg") (not "inv")])
+
;; Similar, but when the second operand is inverted.
(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index dace5ae17c5..ac40f2ef474 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,7 @@
+2020-05-11 Alex Coplan <alex.coplan@arm.com>
+
+ * gcc.target/aarch64/csinv-neg.c: New test.
+
2020-05-11 Kelvin Nilsen <kelvin@gcc.gnu.org>
* gcc.target/powerpc/dg-future-0.c: New.
diff --git a/gcc/testsuite/gcc.target/aarch64/csinv-neg.c b/gcc/testsuite/gcc.target/aarch64/csinv-neg.c
new file mode 100644
index 00000000000..cc64b4094d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/csinv-neg.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+/*
+** inv1:
+** cmp w0, 0
+** csinv w0, w1, w2, ne
+** ret
+*/
+unsigned long long
+inv1(unsigned a, unsigned b, unsigned c)
+{
+ return a ? b : ~c;
+}
+
+/*
+** inv1_local:
+** cmp w0, 0
+** csinv w0, w1, w2, ne
+** ret
+*/
+unsigned long long
+inv1_local(unsigned a, unsigned b, unsigned c)
+{
+ unsigned d = ~c;
+ return a ? b : d;
+}
+
+/*
+** inv_zero1:
+** cmp w0, 0
+** csinv w0, wzr, w1, ne
+** ret
+*/
+unsigned long long
+inv_zero1(unsigned a, unsigned b)
+{
+ return a ? 0 : ~b;
+}
+
+/*
+** inv_zero2:
+** cmp w0, 0
+** csinv w0, wzr, w1, eq
+** ret
+*/
+unsigned long long
+inv_zero2(unsigned a, unsigned b)
+{
+ return a ? ~b : 0;
+}
+
+
+/*
+** inv2:
+** cmp w0, 0
+** csinv w0, w2, w1, eq
+** ret
+*/
+unsigned long long
+inv2(unsigned a, unsigned b, unsigned c)
+{
+ return a ? ~b : c;
+}
+
+/*
+** inv2_local:
+** cmp w0, 0
+** csinv w0, w2, w1, eq
+** ret
+*/
+unsigned long long
+inv2_local(unsigned a, unsigned b, unsigned c)
+{
+ unsigned d = ~b;
+ return a ? d : c;
+}
+
+/*
+** neg1:
+** cmp w0, 0
+** csneg w0, w1, w2, ne
+** ret
+*/
+unsigned long long
+neg1(unsigned a, unsigned b, unsigned c)
+{
+ return a ? b : -c;
+}
+
+
+/*
+** neg2:
+** cmp w0, 0
+** csneg w0, w2, w1, eq
+** ret
+*/
+unsigned long long
+neg2(unsigned a, unsigned b, unsigned c)
+{
+ return a ? -b : c;
+}
+
+/* { dg-final { check-function-bodies "**" "" "" } } */