summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog16
-rw-r--r--gcc/dumpfile.c25
-rw-r--r--gcc/gimple-loop-interchange.cc2
-rw-r--r--gcc/graphite-isl-ast-to-gimple.c11
-rw-r--r--gcc/graphite-optimize-isl.c36
-rw-r--r--gcc/graphite.c3
-rw-r--r--gcc/testsuite/ChangeLog25
-rw-r--r--gcc/tree-loop-distribution.c9
-rw-r--r--gcc/tree-parloops.c17
-rw-r--r--gcc/tree-ssa-loop-niter.c2
-rw-r--r--gcc/tree-vect-data-refs.c10
-rw-r--r--gcc/tree-vect-loop.c53
-rw-r--r--gcc/tree-vect-slp.c84
-rw-r--r--gcc/tree-vect-stmts.c5
-rw-r--r--gcc/tree-vectorizer.c26
15 files changed, 218 insertions, 106 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index ddc99e3102f..2389f1cda8f 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,21 @@
2018-11-13 David Malcolm <dmalcolm@redhat.com>
+ * dumpfile.c (VERIFY_DUMP_ENABLED_P): New macro.
+ (dump_gimple_stmt): Use it.
+ (dump_gimple_stmt_loc): Likewise.
+ (dump_gimple_expr): Likewise.
+ (dump_gimple_expr_loc): Likewise.
+ (dump_generic_expr): Likewise.
+ (dump_generic_expr_loc): Likewise.
+ (dump_printf): Likewise.
+ (dump_printf_loc): Likewise.
+ (dump_dec): Likewise.
+ (dump_dec): Likewise.
+ (dump_hex): Likewise.
+ (dump_symtab_node): Likewise.
+
+2018-11-13 David Malcolm <dmalcolm@redhat.com>
+
PR ipa/87955
* ipa-inline.c (report_inline_failed_reason): Guard calls to
cl_target_option_print_diff and cl_optimization_print_diff with
diff --git a/gcc/dumpfile.c b/gcc/dumpfile.c
index 09c24905f52..a1ab20531db 100644
--- a/gcc/dumpfile.c
+++ b/gcc/dumpfile.c
@@ -1184,6 +1184,19 @@ dump_context dump_context::s_default;
/* Implementation of dump_* API calls, calling into dump_context
member functions. */
+/* Calls to the dump_* functions do non-trivial work, so they ought
+ to be guarded by:
+ if (dump_enabled_p ())
+ Assert that they are guarded, and, if assertions are disabled,
+ bail out if the calls weren't properly guarded. */
+
+#define VERIFY_DUMP_ENABLED_P \
+ do { \
+ gcc_assert (dump_enabled_p ()); \
+ if (!dump_enabled_p ()) \
+ return; \
+ } while (0)
+
/* Dump gimple statement GS with SPC indentation spaces and
EXTRA_DUMP_FLAGS on the dump streams if DUMP_KIND is enabled. */
@@ -1191,6 +1204,7 @@ void
dump_gimple_stmt (dump_flags_t dump_kind, dump_flags_t extra_dump_flags,
gimple *gs, int spc)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_gimple_stmt (dump_kind, extra_dump_flags, gs, spc);
}
@@ -1200,6 +1214,7 @@ void
dump_gimple_stmt_loc (dump_flags_t dump_kind, const dump_location_t &loc,
dump_flags_t extra_dump_flags, gimple *gs, int spc)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_gimple_stmt_loc (dump_kind, loc, extra_dump_flags,
gs, spc);
}
@@ -1212,6 +1227,7 @@ void
dump_gimple_expr (dump_flags_t dump_kind, dump_flags_t extra_dump_flags,
gimple *gs, int spc)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_gimple_expr (dump_kind, extra_dump_flags, gs, spc);
}
@@ -1221,6 +1237,7 @@ void
dump_gimple_expr_loc (dump_flags_t dump_kind, const dump_location_t &loc,
dump_flags_t extra_dump_flags, gimple *gs, int spc)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_gimple_expr_loc (dump_kind, loc, extra_dump_flags,
gs, spc);
}
@@ -1232,6 +1249,7 @@ void
dump_generic_expr (dump_flags_t dump_kind, dump_flags_t extra_dump_flags,
tree t)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_generic_expr (dump_kind, extra_dump_flags, t);
}
@@ -1242,6 +1260,7 @@ void
dump_generic_expr_loc (dump_flags_t dump_kind, const dump_location_t &loc,
dump_flags_t extra_dump_flags, tree t)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_generic_expr_loc (dump_kind, loc, extra_dump_flags,
t);
}
@@ -1251,6 +1270,7 @@ dump_generic_expr_loc (dump_flags_t dump_kind, const dump_location_t &loc,
void
dump_printf (dump_flags_t dump_kind, const char *format, ...)
{
+ VERIFY_DUMP_ENABLED_P;
va_list ap;
va_start (ap, format);
dump_context::get ().dump_printf_va (dump_kind, format, &ap);
@@ -1264,6 +1284,7 @@ void
dump_printf_loc (dump_flags_t dump_kind, const dump_location_t &loc,
const char *format, ...)
{
+ VERIFY_DUMP_ENABLED_P;
va_list ap;
va_start (ap, format);
dump_context::get ().dump_printf_loc_va (dump_kind, loc, format, &ap);
@@ -1276,6 +1297,7 @@ template<unsigned int N, typename C>
void
dump_dec (dump_flags_t dump_kind, const poly_int<N, C> &value)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_dec (dump_kind, value);
}
@@ -1288,6 +1310,7 @@ template void dump_dec (dump_flags_t, const poly_widest_int &);
void
dump_dec (dump_flags_t dump_kind, const poly_wide_int &value, signop sgn)
{
+ VERIFY_DUMP_ENABLED_P;
if (dump_file
&& dump_context::get ().apply_dump_filter_p (dump_kind, pflags))
print_dec (value, dump_file, sgn);
@@ -1302,6 +1325,7 @@ dump_dec (dump_flags_t dump_kind, const poly_wide_int &value, signop sgn)
void
dump_hex (dump_flags_t dump_kind, const poly_wide_int &value)
{
+ VERIFY_DUMP_ENABLED_P;
if (dump_file
&& dump_context::get ().apply_dump_filter_p (dump_kind, pflags))
print_hex (value, dump_file);
@@ -1325,6 +1349,7 @@ dumpfile_ensure_any_optinfo_are_flushed ()
void
dump_symtab_node (dump_flags_t dump_kind, symtab_node *node)
{
+ VERIFY_DUMP_ENABLED_P;
dump_context::get ().dump_symtab_node (dump_kind, node);
}
diff --git a/gcc/gimple-loop-interchange.cc b/gcc/gimple-loop-interchange.cc
index 08aeb8eba9d..9145b1217da 100644
--- a/gcc/gimple-loop-interchange.cc
+++ b/gcc/gimple-loop-interchange.cc
@@ -1645,7 +1645,7 @@ tree_loop_interchange::interchange (vec<data_reference_p> datarefs,
}
simple_dce_from_worklist (m_dce_seeds);
- if (changed_p)
+ if (changed_p && dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
"loops interchanged in loop nest\n");
diff --git a/gcc/graphite-isl-ast-to-gimple.c b/gcc/graphite-isl-ast-to-gimple.c
index 9e78465ea9c..0d8960c6bb2 100644
--- a/gcc/graphite-isl-ast-to-gimple.c
+++ b/gcc/graphite-isl-ast-to-gimple.c
@@ -1518,10 +1518,13 @@ graphite_regenerate_ast_isl (scop_p scop)
if (t.codegen_error_p ())
{
- dump_user_location_t loc = find_loop_location
- (scop->scop_info->region.entry->dest->loop_father);
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
- "loop nest not optimized, code generation error\n");
+ if (dump_enabled_p ())
+ {
+ dump_user_location_t loc = find_loop_location
+ (scop->scop_info->region.entry->dest->loop_father);
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
+ "loop nest not optimized, code generation error\n");
+ }
/* Remove the unreachable region. */
remove_edge_and_dominated_blocks (if_region->true_region->region.entry);
diff --git a/gcc/graphite-optimize-isl.c b/gcc/graphite-optimize-isl.c
index 35e9ac00ec2..8ceaa495a36 100644
--- a/gcc/graphite-optimize-isl.c
+++ b/gcc/graphite-optimize-isl.c
@@ -160,16 +160,19 @@ optimize_isl (scop_p scop)
if (!scop->transformed_schedule
|| isl_ctx_last_error (scop->isl_context) != isl_error_none)
{
- dump_user_location_t loc = find_loop_location
- (scop->scop_info->region.entry->dest->loop_father);
- if (isl_ctx_last_error (scop->isl_context) == isl_error_quota)
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
- "loop nest not optimized, optimization timed out "
- "after %d operations [--param max-isl-operations]\n",
- max_operations);
- else
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
- "loop nest not optimized, ISL signalled an error\n");
+ if (dump_enabled_p ())
+ {
+ dump_user_location_t loc = find_loop_location
+ (scop->scop_info->region.entry->dest->loop_father);
+ if (isl_ctx_last_error (scop->isl_context) == isl_error_quota)
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
+ "loop nest not optimized, optimization timed out "
+ "after %d operations [--param max-isl-operations]\n",
+ max_operations);
+ else
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
+ "loop nest not optimized, ISL signalled an error\n");
+ }
return false;
}
@@ -182,11 +185,14 @@ optimize_isl (scop_p scop)
if (same_schedule)
{
- dump_user_location_t loc = find_loop_location
- (scop->scop_info->region.entry->dest->loop_father);
- dump_printf_loc (MSG_NOTE, loc,
- "loop nest not optimized, optimized schedule is "
- "identical to original schedule\n");
+ if (dump_enabled_p ())
+ {
+ dump_user_location_t loc = find_loop_location
+ (scop->scop_info->region.entry->dest->loop_father);
+ dump_printf_loc (MSG_NOTE, loc,
+ "loop nest not optimized, optimized schedule is "
+ "identical to original schedule\n");
+ }
if (dump_file)
print_schedule_ast (dump_file, scop->original_schedule, scop);
isl_schedule_free (scop->transformed_schedule);
diff --git a/gcc/graphite.c b/gcc/graphite.c
index ddf16a827d0..f49eef606f6 100644
--- a/gcc/graphite.c
+++ b/gcc/graphite.c
@@ -410,7 +410,8 @@ graphite_transform_loops (void)
continue;
changed = true;
- if (graphite_regenerate_ast_isl (scop))
+ if (graphite_regenerate_ast_isl (scop)
+ && dump_enabled_p ())
{
dump_user_location_t loc = find_loop_location
(scops[i]->scop_info->region.entry->dest->loop_father);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index d2f0fd3adc5..9afb5f2702c 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,30 @@
2018-11-13 David Malcolm <dmalcolm@redhat.com>
+ * gimple-loop-interchange.cc (tree_loop_interchange::interchange):
+ Guard dump call with dump_enabled_p.
+ * graphite-isl-ast-to-gimple.c (graphite_regenerate_ast_isl): Likewise.
+ * graphite-optimize-isl.c (optimize_isl): Likewise.
+ * graphite.c (graphite_transform_loops): Likewise.
+ * tree-loop-distribution.c (pass_loop_distribution::execute): Likewise.
+ * tree-parloops.c (parallelize_loops): Likewise.
+ * tree-ssa-loop-niter.c (number_of_iterations_exit): Likewise.
+ * tree-vect-data-refs.c (vect_analyze_group_access_1): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ * tree-vect-loop.c (vect_update_vf_for_slp): Likewise.
+ (vect_estimate_min_profitable_iters): Likewise.
+ * tree-vect-slp.c (vect_record_max_nunits): Likewise.
+ (vect_build_slp_tree_2): Likewise.
+ (vect_supported_load_permutation_p): Likewise.
+ (vect_slp_analyze_operations): Likewise.
+ (vect_slp_analyze_bb_1): Likewise.
+ (vect_slp_bb): Likewise.
+ * tree-vect-stmts.c (vect_analyze_stmt): Likewise.
+ * tree-vectorizer.c (try_vectorize_loop_1): Likewise.
+ (pass_slp_vectorize::execute): Likewise.
+ (increase_alignment): Likewise.
+
+2018-11-13 David Malcolm <dmalcolm@redhat.com>
+
PR ipa/87955
* gcc.target/i386/pr87955.c: New test.
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 1e8a9f0991b..8f61a35e5b1 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -3139,10 +3139,11 @@ pass_loop_distribution::execute (function *fun)
if (nb_generated_loops + nb_generated_calls > 0)
{
changed = true;
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
- loc, "Loop%s %d distributed: split to %d loops "
- "and %d library calls.\n", str, loop->num,
- nb_generated_loops, nb_generated_calls);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
+ loc, "Loop%s %d distributed: split to %d loops "
+ "and %d library calls.\n", str, loop->num,
+ nb_generated_loops, nb_generated_calls);
break;
}
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 94824a0236f..4e22898268f 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -3409,13 +3409,16 @@ parallelize_loops (bool oacc_kernels_p)
changed = true;
skip_loop = loop->inner;
- dump_user_location_t loop_loc = find_loop_location (loop);
- if (loop->inner)
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loop_loc,
- "parallelizing outer loop %d\n", loop->num);
- else
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loop_loc,
- "parallelizing inner loop %d\n", loop->num);
+ if (dump_enabled_p ())
+ {
+ dump_user_location_t loop_loc = find_loop_location (loop);
+ if (loop->inner)
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loop_loc,
+ "parallelizing outer loop %d\n", loop->num);
+ else
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loop_loc,
+ "parallelizing inner loop %d\n", loop->num);
+ }
gen_parallel_loop (loop, &reduction_list,
n_threads, &niter_desc, oacc_kernels_p);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index e763b35ee84..9bcd66449fa 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -2630,7 +2630,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
if (integer_nonzerop (niter->assumptions))
return true;
- if (warn)
+ if (warn && dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
"missed loop optimization: niters analysis ends up "
"with assumptions.\n");
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 8d9acd84f09..1cc0320f00d 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -2458,7 +2458,8 @@ vect_analyze_group_access_1 (dr_vec_info *dr_info)
return true;
}
- dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
STMT_VINFO_STRIDED_P (stmt_info) = true;
return true;
}
@@ -3558,9 +3559,10 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
unsigned int count = (comp_alias_ddrs.length ()
+ check_unequal_addrs.length ());
- dump_printf_loc (MSG_NOTE, vect_location,
- "improved number of alias checks from %d to %d\n",
- may_alias_ddrs.length (), count);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "improved number of alias checks from %d to %d\n",
+ may_alias_ddrs.length (), count);
if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
return opt_result::failure_at
(vect_location,
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 1a39b3bb4e9..5baf87b926c 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -1399,14 +1399,16 @@ vect_update_vf_for_slp (loop_vec_info loop_vinfo)
if (only_slp_in_loop)
{
- dump_printf_loc (MSG_NOTE, vect_location,
- "Loop contains only SLP stmts\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Loop contains only SLP stmts\n");
vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
}
else
{
- dump_printf_loc (MSG_NOTE, vect_location,
- "Loop contains SLP and non-SLP stmts\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Loop contains SLP and non-SLP stmts\n");
/* Both the vectorization factor and unroll factor have the form
current_vector_size * X for some rational X, so they must have
a common multiple. */
@@ -3337,7 +3339,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
/* Cost model disabled. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
- dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
*ret_min_profitable_niters = 0;
*ret_min_profitable_estimate = 0;
return;
@@ -3350,9 +3353,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
- dump_printf (MSG_NOTE,
- "cost model: Adding cost of checks for loop "
- "versioning to treat misalignment.\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ "cost model: Adding cost of checks for loop "
+ "versioning to treat misalignment.\n");
}
/* Requires loop versioning with alias checks. */
@@ -3379,9 +3383,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
(void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
NULL, 0, vect_prologue);
}
- dump_printf (MSG_NOTE,
- "cost model: Adding cost of checks for loop "
- "versioning aliasing.\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ "cost model: Adding cost of checks for loop "
+ "versioning aliasing.\n");
}
/* Requires loop versioning with niter checks. */
@@ -3390,9 +3395,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
/* FIXME: Make cost depend on complexity of individual check. */
(void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
vect_prologue);
- dump_printf (MSG_NOTE,
- "cost model: Adding cost of checks for loop "
- "versioning niters.\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ "cost model: Adding cost of checks for loop "
+ "versioning niters.\n");
}
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
@@ -3440,15 +3446,17 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
else if (npeel < 0)
{
peel_iters_prologue = assumed_vf / 2;
- dump_printf (MSG_NOTE, "cost model: "
- "prologue peel iters set to vf/2.\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "cost model: "
+ "prologue peel iters set to vf/2.\n");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
peel_iters_epilogue = assumed_vf / 2;
- dump_printf (MSG_NOTE, "cost model: "
- "epilogue peel iters set to vf/2 because "
- "peeling for alignment is unknown.\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "cost model: "
+ "epilogue peel iters set to vf/2 because "
+ "peeling for alignment is unknown.\n");
/* If peeled iterations are unknown, count a taken branch and a not taken
branch per peeled loop. Even if scalar loop iterations are known,
@@ -3653,9 +3661,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
return;
}
- dump_printf (MSG_NOTE,
- " Calculated minimum iters for profitability: %d\n",
- min_profitable_iters);
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE,
+ " Calculated minimum iters for profitability: %d\n",
+ min_profitable_iters);
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& min_profitable_iters < (assumed_vf + peel_iters_prologue))
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index f802b004bef..f2bb8da9de2 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -575,9 +575,10 @@ vect_record_max_nunits (stmt_vec_info stmt_info, unsigned int group_size,
&& (!nunits.is_constant (&const_nunits)
|| const_nunits > group_size))
{
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unrolling required "
- "in basic block SLP\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unrolling required "
+ "in basic block SLP\n");
/* Fatal mismatch. */
return false;
}
@@ -1231,9 +1232,10 @@ vect_build_slp_tree_2 (vec_info *vinfo,
vect_free_slp_tree (grandchild, false);
SLP_TREE_CHILDREN (child).truncate (0);
- dump_printf_loc (MSG_NOTE, vect_location,
- "Building parent vector operands from "
- "scalars instead\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Building parent vector operands from "
+ "scalars instead\n");
oprnd_info->def_stmts = vNULL;
SLP_TREE_DEF_TYPE (child) = vect_external_def;
children.safe_push (child);
@@ -1261,8 +1263,9 @@ vect_build_slp_tree_2 (vec_info *vinfo,
scalar version. */
&& !is_pattern_stmt_p (stmt_info))
{
- dump_printf_loc (MSG_NOTE, vect_location,
- "Building vector operands from scalars\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Building vector operands from scalars\n");
child = vect_create_new_slp_node (oprnd_info->def_stmts);
SLP_TREE_DEF_TYPE (child) = vect_external_def;
children.safe_push (child);
@@ -1334,16 +1337,19 @@ vect_build_slp_tree_2 (vec_info *vinfo,
while (j != group_size);
/* Swap mismatched definition stmts. */
- dump_printf_loc (MSG_NOTE, vect_location,
- "Re-trying with swapped operands of stmts ");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Re-trying with swapped operands of stmts ");
for (j = 0; j < group_size; ++j)
if (matches[j] == !swap_not_matching)
{
std::swap (oprnds_info[0]->def_stmts[j],
oprnds_info[1]->def_stmts[j]);
- dump_printf (MSG_NOTE, "%d ", j);
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "%d ", j);
}
- dump_printf (MSG_NOTE, "\n");
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "\n");
/* And try again with scratch 'matches' ... */
bool *tem = XALLOCAVEC (bool, group_size);
if ((child = vect_build_slp_tree (vinfo, oprnd_info->def_stmts,
@@ -1399,9 +1405,10 @@ vect_build_slp_tree_2 (vec_info *vinfo,
vect_free_slp_tree (grandchild, false);
SLP_TREE_CHILDREN (child).truncate (0);
- dump_printf_loc (MSG_NOTE, vect_location,
- "Building parent vector operands from "
- "scalars instead\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Building parent vector operands from "
+ "scalars instead\n");
oprnd_info->def_stmts = vNULL;
SLP_TREE_DEF_TYPE (child) = vect_external_def;
children.safe_push (child);
@@ -1757,9 +1764,10 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
|| maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1)))
{
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "BB vectorization with gaps at the end of "
- "a load is not supported\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "BB vectorization with gaps at the end of "
+ "a load is not supported\n");
return false;
}
@@ -1769,9 +1777,10 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
if (!vect_transform_slp_perm_load (node, tem, NULL,
1, slp_instn, true, &n_perms))
{
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "unsupported load permutation\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "unsupported load permutation\n");
return false;
}
}
@@ -2592,9 +2601,10 @@ vect_slp_analyze_operations (vec_info *vinfo)
{
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
- dump_printf_loc (MSG_NOTE, vect_location,
- "removing SLP instance operations starting from: %G",
- stmt_info->stmt);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "removing SLP instance operations starting from: %G",
+ stmt_info->stmt);
vect_free_slp_instance (instance, false);
vinfo->slp_instances.ordered_remove (i);
cost_vec.release ();
@@ -2888,9 +2898,10 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
{
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
- dump_printf_loc (MSG_NOTE, vect_location,
- "removing SLP instance operations starting from: %G",
- stmt_info->stmt);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "removing SLP instance operations starting from: %G",
+ stmt_info->stmt);
vect_free_slp_instance (instance, false);
BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
continue;
@@ -3006,14 +3017,17 @@ vect_slp_bb (basic_block bb)
vect_schedule_slp (bb_vinfo);
unsigned HOST_WIDE_INT bytes;
- if (current_vector_size.is_constant (&bytes))
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "basic block part vectorized using %wu byte "
- "vectors\n", bytes);
- else
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "basic block part vectorized using variable "
- "length vectors\n");
+ if (dump_enabled_p ())
+ {
+ if (current_vector_size.is_constant (&bytes))
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "basic block part vectorized using %wu byte "
+ "vectors\n", bytes);
+ else
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "basic block part vectorized using variable "
+ "length vectors\n");
+ }
vectorized = true;
}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 51088cb0ba9..74646570e2a 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -9530,8 +9530,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
if (PURE_SLP_STMT (stmt_info) && !node)
{
- dump_printf_loc (MSG_NOTE, vect_location,
- "handled only by SLP analysis\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "handled only by SLP analysis\n");
return opt_result::success ();
}
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index 12bf0fcd5bd..0a4eca51ad7 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -925,8 +925,9 @@ try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
}
if (!require_loop_vectorize && vect_slp_bb (bb))
{
- dump_printf_loc (MSG_NOTE, vect_location,
- "basic block vectorized\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "basic block vectorized\n");
fold_loop_internal_call (loop_vectorized_call,
boolean_true_node);
loop_vectorized_call = NULL;
@@ -955,12 +956,15 @@ try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
unsigned HOST_WIDE_INT bytes;
- if (current_vector_size.is_constant (&bytes))
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "loop vectorized using %wu byte vectors\n", bytes);
- else
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "loop vectorized using variable length vectors\n");
+ if (dump_enabled_p ())
+ {
+ if (current_vector_size.is_constant (&bytes))
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop vectorized using %wu byte vectors\n", bytes);
+ else
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop vectorized using variable length vectors\n");
+ }
loop_p new_loop = vect_transform_loop (loop_vinfo);
(*num_vectorized_loops)++;
@@ -1289,7 +1293,8 @@ pass_slp_vectorize::execute (function *fun)
FOR_EACH_BB_FN (bb, fun)
{
if (vect_slp_bb (bb))
- dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
}
if (!in_loop_pipeline)
@@ -1447,7 +1452,8 @@ increase_alignment (void)
if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
{
vnode->increase_alignment (alignment);
- dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
+ if (dump_enabled_p ())
+ dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
}
}