summaryrefslogtreecommitdiff
path: root/gcc/haifa-sched.c
diff options
context:
space:
mode:
authorVlad Lazar <vlad.lazar@arm.com>2018-09-13 13:43:48 +0000
committerVlad Lazar <vladlazar@gcc.gnu.org>2018-09-13 13:43:48 +0000
commitd8d9514c872857d35c69331bf672b8eb94939e21 (patch)
tree4346e9e7761c4a0faba4c250647cfcca10ebf403 /gcc/haifa-sched.c
parent2aa4cc744ff8ebfaaec69edf8d21411cecc0b783 (diff)
Schedule by INSN_COST in case of tie
2018-09-13 Vlad Lazar <vlad.lazar@arm.com> * haifa-sched.c (rank_for_schedule): Schedule by INSN_COST. (rfs_decision): New scheduling decision. From-SVN: r264270
Diffstat (limited to 'gcc/haifa-sched.c')
-rw-r--r--gcc/haifa-sched.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 4f0221f6f43..1fdc9df9fb2 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -2542,7 +2542,7 @@ enum rfs_decision {
RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
- RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
+ RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
/* Corresponding strings for print outs. */
static const char *rfs_str[RFS_N] = {
@@ -2550,7 +2550,7 @@ static const char *rfs_str[RFS_N] = {
"RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
"RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
"RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
- "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
+ "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
/* Statistical breakdown of rank_for_schedule decisions. */
struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
@@ -2803,6 +2803,14 @@ rank_for_schedule (const void *x, const void *y)
if (flag_sched_dep_count_heuristic && val != 0)
return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
+ /* Sort by INSN_COST rather than INSN_LUID. This means that instructions
+ which take longer to execute are prioritised and it leads to more
+ dual-issue opportunities on in-order cores which have this feature. */
+
+ if (INSN_COST (tmp) != INSN_COST (tmp2))
+ return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp),
+ tmp, tmp2);
+
/* If insns are equally good, sort by INSN_LUID (original insn order),
so that we make the sort stable. This minimizes instruction movement,
thus minimizing sched's effect on debugging and cross-jumping. */