summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog18
-rw-r--r--gcc/combine.c28
-rw-r--r--gcc/expr.c17
-rw-r--r--gcc/fwprop.c7
-rw-r--r--gcc/loop-iv.c6
-rw-r--r--gcc/optabs.c8
-rw-r--r--gcc/postreload.c8
-rw-r--r--gcc/recog.c3
-rw-r--r--gcc/simplify-rtx.c38
9 files changed, 84 insertions, 49 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 841fe4b726c..54a4cf995c4 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -2,6 +2,24 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+ * combine.c (find_split_point): Add is_a <scalar_int_mode> checks.
+ (make_compound_operation_int): Likewise.
+ (change_zero_ext): Likewise.
+ * expr.c (convert_move): Likewise.
+ (convert_modes): Likewise.
+ * fwprop.c (forward_propagate_subreg): Likewise.
+ * loop-iv.c (get_biv_step_1): Likewise.
+ * optabs.c (widen_operand): Likewise.
+ * postreload.c (move2add_valid_value_p): Likewise.
+ * recog.c (simplify_while_replacing): Likewise.
+ * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+ (simplify_binary_operation_1): Likewise. Remove redundant
+ mode equality check.
+
+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
* combine.c (combine_simplify_rtx): Add checks for
is_a <scalar_int_mode>.
(simplify_if_then_else): Likewise.
diff --git a/gcc/combine.c b/gcc/combine.c
index 65877e9daf7..accd2540881 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -4793,7 +4793,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
HOST_WIDE_INT pos = 0;
int unsignedp = 0;
rtx inner = NULL_RTX;
- scalar_int_mode inner_mode;
+ scalar_int_mode mode, inner_mode;
/* First special-case some codes. */
switch (code)
@@ -5047,7 +5047,9 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
case SIGN_EXTRACT:
case ZERO_EXTRACT:
- if (CONST_INT_P (XEXP (SET_SRC (x), 1))
+ if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
+ &inner_mode)
+ && CONST_INT_P (XEXP (SET_SRC (x), 1))
&& CONST_INT_P (XEXP (SET_SRC (x), 2)))
{
inner = XEXP (SET_SRC (x), 0);
@@ -5055,7 +5057,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
pos = INTVAL (XEXP (SET_SRC (x), 2));
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
+ pos = GET_MODE_PRECISION (inner_mode) - len - pos;
unsignedp = (code == ZERO_EXTRACT);
}
break;
@@ -5065,10 +5067,9 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
}
if (len && pos >= 0
- && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
+ && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
+ && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
{
- machine_mode mode = GET_MODE (SET_SRC (x));
-
/* For unsigned, we have a choice of a shift followed by an
AND or two shifts. Use two shifts for field sizes where the
constant might be too large. We assume here that we can
@@ -7846,6 +7847,7 @@ make_compound_operation_int (machine_mode mode, rtx *x_ptr,
rtx new_rtx = 0;
int i;
rtx tem;
+ scalar_int_mode inner_mode;
bool equality_comparison = false;
if (in_code == EQ)
@@ -7954,11 +7956,12 @@ make_compound_operation_int (machine_mode mode, rtx *x_ptr,
/* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
else if (GET_CODE (XEXP (x, 0)) == SUBREG
&& subreg_lowpart_p (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
+ &inner_mode)
&& GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
&& (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
{
rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
- machine_mode inner_mode = GET_MODE (inner_x0);
new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
new_rtx = make_extraction (inner_mode, new_rtx, 0,
XEXP (inner_x0, 1),
@@ -8148,14 +8151,14 @@ make_compound_operation_int (machine_mode mode, rtx *x_ptr,
/* If the SUBREG is masking of a logical right shift,
make an extraction. */
if (GET_CODE (inner) == LSHIFTRT
+ && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
&& CONST_INT_P (XEXP (inner, 1))
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
- && (UINTVAL (XEXP (inner, 1))
- < GET_MODE_PRECISION (GET_MODE (inner)))
+ && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
&& subreg_lowpart_p (x))
{
new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
- int width = GET_MODE_PRECISION (GET_MODE (inner))
+ int width = GET_MODE_PRECISION (inner_mode)
- INTVAL (XEXP (inner, 1));
if (width > mode_width)
width = mode_width;
@@ -11358,15 +11361,16 @@ change_zero_ext (rtx pat)
maybe_swap_commutative_operands (**iter);
rtx *dst = &SET_DEST (pat);
+ scalar_int_mode mode;
if (GET_CODE (*dst) == ZERO_EXTRACT
&& REG_P (XEXP (*dst, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
&& CONST_INT_P (XEXP (*dst, 1))
&& CONST_INT_P (XEXP (*dst, 2)))
{
rtx reg = XEXP (*dst, 0);
int width = INTVAL (XEXP (*dst, 1));
int offset = INTVAL (XEXP (*dst, 2));
- machine_mode mode = GET_MODE (reg);
int reg_width = GET_MODE_PRECISION (mode);
if (BITS_BIG_ENDIAN)
offset = reg_width - width - offset;
diff --git a/gcc/expr.c b/gcc/expr.c
index 225b8c2925e..ce5f42e0ea6 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -239,11 +239,14 @@ convert_move (rtx to, rtx from, int unsignedp)
the required extension, strip it. We don't handle such SUBREGs as
TO here. */
- if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from)
+ scalar_int_mode to_int_mode;
+ if (GET_CODE (from) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (from)
+ && is_a <scalar_int_mode> (to_mode, &to_int_mode)
&& (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (from)))
- >= GET_MODE_PRECISION (to_mode))
+ >= GET_MODE_PRECISION (to_int_mode))
&& SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
- from = gen_lowpart (to_mode, from), from_mode = to_mode;
+ from = gen_lowpart (to_int_mode, from), from_mode = to_int_mode;
gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
@@ -635,10 +638,12 @@ convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
/* If FROM is a SUBREG that indicates that we have already done at least
the required extension, strip it. */
- if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
- && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode)
+ if (GET_CODE (x) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (x)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (int_mode)
&& SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
- x = gen_lowpart (mode, SUBREG_REG (x));
+ x = gen_lowpart (int_mode, SUBREG_REG (x));
if (GET_MODE (x) != VOIDmode)
oldmode = GET_MODE (x);
diff --git a/gcc/fwprop.c b/gcc/fwprop.c
index cab801e5cc7..ca997490cf1 100644
--- a/gcc/fwprop.c
+++ b/gcc/fwprop.c
@@ -1095,6 +1095,7 @@ forward_propagate_subreg (df_ref use, rtx_insn *def_insn, rtx def_set)
rtx use_reg = DF_REF_REG (use);
rtx_insn *use_insn;
rtx src;
+ scalar_int_mode int_use_mode, src_mode;
/* Only consider subregs... */
machine_mode use_mode = GET_MODE (use_reg);
@@ -1136,17 +1137,19 @@ forward_propagate_subreg (df_ref use, rtx_insn *def_insn, rtx def_set)
definition of Y or, failing that, allow A to be deleted after
reload through register tying. Introducing more uses of Y
prevents both optimisations. */
- else if (subreg_lowpart_p (use_reg))
+ else if (is_a <scalar_int_mode> (use_mode, &int_use_mode)
+ && subreg_lowpart_p (use_reg))
{
use_insn = DF_REF_INSN (use);
src = SET_SRC (def_set);
if ((GET_CODE (src) == ZERO_EXTEND
|| GET_CODE (src) == SIGN_EXTEND)
+ && is_a <scalar_int_mode> (GET_MODE (src), &src_mode)
&& REG_P (XEXP (src, 0))
&& REGNO (XEXP (src, 0)) >= FIRST_PSEUDO_REGISTER
&& GET_MODE (XEXP (src, 0)) == use_mode
&& !free_load_extend (src, def_insn)
- && (targetm.mode_rep_extended (use_mode, GET_MODE (src))
+ && (targetm.mode_rep_extended (int_use_mode, src_mode)
!= (int) GET_CODE (src))
&& all_uses_available_at (def_insn, use_insn))
return try_fwprop_subst (use, DF_REF_LOC (use), XEXP (src, 0),
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index 896fe0b1a00..61074e3fb1a 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -739,9 +739,9 @@ get_biv_step_1 (df_ref def, rtx reg,
if (GET_CODE (next) == SUBREG)
{
- machine_mode amode = GET_MODE (next);
-
- if (GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
+ scalar_int_mode amode;
+ if (!is_a <scalar_int_mode> (GET_MODE (next), &amode)
+ || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
return false;
*inner_mode = amode;
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 02cd1082a37..1dfb545d251 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -195,6 +195,7 @@ widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
int unsignedp, int no_extend)
{
rtx result;
+ scalar_int_mode int_mode;
/* If we don't have to extend and this is a constant, return it. */
if (no_extend && GET_MODE (op) == VOIDmode)
@@ -204,19 +205,20 @@ widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
extend since it will be more efficient to do so unless the signedness of
a promoted object differs from our extension. */
if (! no_extend
+ || !is_a <scalar_int_mode> (mode, &int_mode)
|| (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
return convert_modes (mode, oldmode, op, unsignedp);
/* If MODE is no wider than a single word, we return a lowpart or paradoxical
SUBREG. */
- if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
- return gen_lowpart (mode, force_reg (GET_MODE (op), op));
+ if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
+ return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
/* Otherwise, get an object of MODE, clobber it, and set the low-order
part to OP. */
- result = gen_reg_rtx (mode);
+ result = gen_reg_rtx (int_mode);
emit_clobber (result);
emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
return result;
diff --git a/gcc/postreload.c b/gcc/postreload.c
index f76321d27d9..38948dcf9c5 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -1699,14 +1699,16 @@ move2add_valid_value_p (int regno, machine_mode mode)
if (mode != reg_mode[regno])
{
- if (!MODES_OK_FOR_MOVE2ADD (mode, reg_mode[regno]))
+ scalar_int_mode old_mode;
+ if (!is_a <scalar_int_mode> (reg_mode[regno], &old_mode)
+ || !MODES_OK_FOR_MOVE2ADD (mode, old_mode))
return false;
/* The value loaded into regno in reg_mode[regno] is also valid in
mode after truncation only if (REG:mode regno) is the lowpart of
(REG:reg_mode[regno] regno). Now, for big endian, the starting
regno of the lowpart might be different. */
- int s_off = subreg_lowpart_offset (mode, reg_mode[regno]);
- s_off = subreg_regno_offset (regno, reg_mode[regno], s_off, mode);
+ int s_off = subreg_lowpart_offset (mode, old_mode);
+ s_off = subreg_regno_offset (regno, old_mode, s_off, mode);
if (s_off != 0)
/* We could in principle adjust regno, check reg_mode[regno] to be
BLKmode, and return s_off to the caller (vs. -1 for failure),
diff --git a/gcc/recog.c b/gcc/recog.c
index 4467bf73a6f..15476db87d1 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -560,6 +560,7 @@ simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx new_rtx = NULL_RTX;
+ scalar_int_mode is_mode;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
@@ -655,6 +656,7 @@ simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
happen, we might just fail in some cases). */
if (MEM_P (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &is_mode)
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
@@ -662,7 +664,6 @@ simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
&& !MEM_VOLATILE_P (XEXP (x, 0)))
{
machine_mode wanted_mode = VOIDmode;
- machine_mode is_mode = GET_MODE (XEXP (x, 0));
int pos = INTVAL (XEXP (x, 2));
if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index c3c6a80a189..8473190b7a0 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -925,7 +925,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
{
enum rtx_code reversed;
rtx temp;
- scalar_int_mode inner, int_mode;
+ scalar_int_mode inner, int_mode, op0_mode;
switch (code)
{
@@ -1637,21 +1637,19 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
(zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
(and:SI (reg:SI) (const_int 63)). */
if (GET_CODE (op) == SUBREG
- && GET_MODE_PRECISION (GET_MODE (op))
- < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_PRECISION (mode)
- >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
+ && GET_MODE_PRECISION (GET_MODE (op)) < GET_MODE_PRECISION (op0_mode)
+ && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
&& subreg_lowpart_p (op)
- && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
+ && (nonzero_bits (SUBREG_REG (op), op0_mode)
& ~GET_MODE_MASK (GET_MODE (op))) == 0)
{
- if (GET_MODE_PRECISION (mode)
- == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
+ if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
return SUBREG_REG (op);
- return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
- GET_MODE (SUBREG_REG (op)));
+ return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
+ op0_mode);
}
#if defined(POINTERS_EXTEND_UNSIGNED)
@@ -2716,21 +2714,23 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
by simplify_shift_const. */
if (GET_CODE (opleft) == SUBREG
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
+ &inner_mode)
&& GET_CODE (SUBREG_REG (opleft)) == ASHIFT
&& GET_CODE (opright) == LSHIFTRT
&& GET_CODE (XEXP (opright, 0)) == SUBREG
- && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
&& SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
- && (GET_MODE_SIZE (GET_MODE (opleft))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
+ && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
&& CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
&& CONST_INT_P (XEXP (opright, 1))
- && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_PRECISION (mode)))
- return gen_rtx_ROTATE (mode, XEXP (opright, 0),
- XEXP (SUBREG_REG (opleft), 1));
+ && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
+ + INTVAL (XEXP (opright, 1))
+ == GET_MODE_PRECISION (int_mode)))
+ return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
+ XEXP (SUBREG_REG (opleft), 1));
/* If we have (ior (and (X C1) C2)), simplify this by making
C1 as small as possible if C1 actually changes. */