summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-10-10 16:55:34 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-10-10 16:55:34 +0000
commit8e6cdc90d41633e09a3a34bb8c6f71cf246101b2 (patch)
treec8d6aac483b647ae892778e9398965071da02686 /gcc
parent3934b625ee2f522edf455191d1eaaa42e265a1d9 (diff)
Require wi::to_wide for trees
The wide_int routines allow things like: wi::add (t, 1) to add 1 to an INTEGER_CST T in its native precision. But we also have: wi::to_offset (t) // Treat T as an offset_int wi::to_widest (t) // Treat T as a widest_int Recently we also gained: wi::to_wide (t, prec) // Treat T as a wide_int in preccision PREC This patch therefore requires: wi::to_wide (t) when operating on INTEGER_CSTs in their native precision. This is just as efficient, and makes it clearer that a deliberate choice is being made to treat the tree as a wide_int in its native precision. This also removes the inconsistency that a) INTEGER_CSTs in their native precision can be used without an accessor but must use wi:: functions instead of C++ operators b) the other forms need an explicit accessor but the result can be used with C++ operators. It also helps with SVE, where there's the additional possibility that the tree could be a runtime value. 2017-10-10 Richard Sandiford <richard.sandiford@linaro.org> gcc/ * wide-int.h (wide_int_ref_storage): Make host_dependent_precision a template parameter. (WIDE_INT_REF_FOR): Update accordingly. * tree.h (wi::int_traits <const_tree>): Delete. (wi::tree_to_widest_ref, wi::tree_to_offset_ref): New typedefs. (wi::to_widest, wi::to_offset): Use them. Expand commentary. (wi::tree_to_wide_ref): New typedef. (wi::to_wide): New function. * calls.c (get_size_range): Use wi::to_wide when operating on trees as wide_ints. * cgraph.c (cgraph_node::create_thunk): Likewise. * config/i386/i386.c (ix86_data_alignment): Likewise. (ix86_local_alignment): Likewise. * dbxout.c (stabstr_O): Likewise. * dwarf2out.c (add_scalar_info, gen_enumeration_type_die): Likewise. * expr.c (const_vector_from_tree): Likewise. * fold-const-call.c (host_size_t_cst_p, fold_const_call_1): Likewise. * fold-const.c (may_negate_without_overflow_p, negate_expr_p) (fold_negate_expr_1, int_const_binop_1, const_binop) (fold_convert_const_int_from_real, optimize_bit_field_compare) (all_ones_mask_p, sign_bit_p, unextend, extract_muldiv_1) (fold_div_compare, fold_single_bit_test, fold_plusminus_mult_expr) (pointer_may_wrap_p, expr_not_equal_to, fold_binary_loc) (fold_ternary_loc, multiple_of_p, fold_negate_const, fold_abs_const) (fold_not_const, round_up_loc): Likewise. * gimple-fold.c (gimple_fold_indirect_ref): Likewise. * gimple-ssa-warn-alloca.c (alloca_call_type_by_arg): Likewise. (alloca_call_type): Likewise. * gimple.c (preprocess_case_label_vec_for_gimple): Likewise. * godump.c (go_output_typedef): Likewise. * graphite-sese-to-poly.c (tree_int_to_gmp): Likewise. * internal-fn.c (get_min_precision): Likewise. * ipa-cp.c (ipcp_store_vr_results): Likewise. * ipa-polymorphic-call.c (ipa_polymorphic_call_context::ipa_polymorphic_call_context): Likewise. * ipa-prop.c (ipa_print_node_jump_functions_for_edge): Likewise. (ipa_modify_call_arguments): Likewise. * match.pd: Likewise. * omp-low.c (scan_omp_1_op, lower_omp_ordered_clauses): Likewise. * print-tree.c (print_node_brief, print_node): Likewise. * stmt.c (expand_case): Likewise. * stor-layout.c (layout_type): Likewise. * tree-affine.c (tree_to_aff_combination): Likewise. * tree-cfg.c (group_case_labels_stmt): Likewise. * tree-data-ref.c (dr_analyze_indices): Likewise. (prune_runtime_alias_test_list): Likewise. * tree-dump.c (dequeue_and_dump): Likewise. * tree-inline.c (remap_gimple_op_r, copy_tree_body_r): Likewise. * tree-predcom.c (is_inv_store_elimination_chain): Likewise. * tree-pretty-print.c (dump_generic_node): Likewise. * tree-scalar-evolution.c (iv_can_overflow_p): Likewise. (simple_iv_with_niters): Likewise. * tree-ssa-address.c (addr_for_mem_ref): Likewise. * tree-ssa-ccp.c (ccp_finalize, evaluate_stmt): Likewise. * tree-ssa-loop-ivopts.c (constant_multiple_of): Likewise. * tree-ssa-loop-niter.c (split_to_var_and_offset) (refine_value_range_using_guard, number_of_iterations_ne_max) (number_of_iterations_lt_to_ne, number_of_iterations_lt) (get_cst_init_from_scev, record_nonwrapping_iv) (scev_var_range_cant_overflow): Likewise. * tree-ssa-phiopt.c (minmax_replacement): Likewise. * tree-ssa-pre.c (compute_avail): Likewise. * tree-ssa-sccvn.c (vn_reference_fold_indirect): Likewise. (vn_reference_maybe_forwprop_address, valueized_wider_op): Likewise. * tree-ssa-structalias.c (get_constraint_for_ptr_offset): Likewise. * tree-ssa-uninit.c (is_pred_expr_subset_of): Likewise. * tree-ssanames.c (set_nonzero_bits, get_nonzero_bits): Likewise. * tree-switch-conversion.c (collect_switch_conv_info, array_value_type) (dump_case_nodes, try_switch_expansion): Likewise. * tree-vect-loop-manip.c (vect_gen_vector_loop_niters): Likewise. (vect_do_peeling): Likewise. * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise. * tree-vect-stmts.c (vectorizable_load): Likewise. * tree-vrp.c (compare_values_warnv, vrp_int_const_binop): Likewise. (zero_nonzero_bits_from_vr, ranges_from_anti_range): Likewise. (extract_range_from_binary_expr_1, adjust_range_with_scev): Likewise. (overflow_comparison_p_1, register_edge_assert_for_2): Likewise. (is_masked_range_test, find_switch_asserts, maybe_set_nonzero_bits) (vrp_evaluate_conditional_warnv_with_ops, intersect_ranges): Likewise. (range_fits_type_p, two_valued_val_range_p, vrp_finalize): Likewise. (evrp_dom_walker::before_dom_children): Likewise. * tree.c (cache_integer_cst, real_value_from_int_cst, integer_zerop) (integer_all_onesp, integer_pow2p, integer_nonzerop, tree_log2) (tree_floor_log2, tree_ctz, mem_ref_offset, tree_int_cst_sign_bit) (tree_int_cst_sgn, get_unwidened, int_fits_type_p): Likewise. (get_type_static_bounds, num_ending_zeros, drop_tree_overflow) (get_range_pos_neg): Likewise. * ubsan.c (ubsan_expand_ptr_ifn): Likewise. * config/darwin.c (darwin_mergeable_constant_section): Likewise. * config/aarch64/aarch64.c (aapcs_vfp_sub_candidate): Likewise. * config/arm/arm.c (aapcs_vfp_sub_candidate): Likewise. * config/avr/avr.c (avr_fold_builtin): Likewise. * config/bfin/bfin.c (bfin_local_alignment): Likewise. * config/msp430/msp430.c (msp430_attr): Likewise. * config/nds32/nds32.c (nds32_insert_attributes): Likewise. * config/powerpcspe/powerpcspe-c.c (altivec_resolve_overloaded_builtin): Likewise. * config/powerpcspe/powerpcspe.c (rs6000_aggregate_candidate) (rs6000_expand_ternop_builtin): Likewise. * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): Likewise. * config/rs6000/rs6000.c (rs6000_aggregate_candidate): Likewise. (rs6000_expand_ternop_builtin): Likewise. * config/s390/s390.c (s390_handle_hotpatch_attribute): Likewise. gcc/ada/ * gcc-interface/decl.c (annotate_value): Use wi::to_wide when operating on trees as wide_ints. gcc/c/ * c-parser.c (c_parser_cilk_clause_vectorlength): Use wi::to_wide when operating on trees as wide_ints. * c-typeck.c (build_c_cast, c_finish_omp_clauses): Likewise. (c_tree_equal): Likewise. gcc/c-family/ * c-ada-spec.c (dump_generic_ada_node): Use wi::to_wide when operating on trees as wide_ints. * c-common.c (pointer_int_sum): Likewise. * c-pretty-print.c (pp_c_integer_constant): Likewise. * c-warn.c (match_case_to_enum_1): Likewise. (c_do_switch_warnings): Likewise. (maybe_warn_shift_overflow): Likewise. gcc/cp/ * cvt.c (ignore_overflows): Use wi::to_wide when operating on trees as wide_ints. * decl.c (check_array_designated_initializer): Likewise. * mangle.c (write_integer_cst): Likewise. * semantics.c (cp_finish_omp_clause_depend_sink): Likewise. gcc/fortran/ * target-memory.c (gfc_interpret_logical): Use wi::to_wide when operating on trees as wide_ints. * trans-const.c (gfc_conv_tree_to_mpz): Likewise. * trans-expr.c (gfc_conv_cst_int_power): Likewise. * trans-intrinsic.c (trans_this_image): Likewise. (gfc_conv_intrinsic_bound): Likewise. (conv_intrinsic_cobound): Likewise. gcc/lto/ * lto.c (compare_tree_sccs_1): Use wi::to_wide when operating on trees as wide_ints. gcc/objc/ * objc-act.c (objc_decl_method_attributes): Use wi::to_wide when operating on trees as wide_ints. From-SVN: r253595
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog107
-rw-r--r--gcc/ada/ChangeLog5
-rw-r--r--gcc/ada/gcc-interface/decl.c5
-rw-r--r--gcc/c-family/ChangeLog10
-rw-r--r--gcc/c-family/c-ada-spec.c2
-rw-r--r--gcc/c-family/c-common.c2
-rw-r--r--gcc/c-family/c-pretty-print.c4
-rw-r--r--gcc/c-family/c-warn.c16
-rw-r--r--gcc/c/ChangeLog7
-rw-r--r--gcc/c/c-parser.c2
-rw-r--r--gcc/c/c-typeck.c6
-rw-r--r--gcc/calls.c20
-rw-r--r--gcc/cgraph.c2
-rw-r--r--gcc/config/aarch64/aarch64.c9
-rw-r--r--gcc/config/arm/arm.c9
-rw-r--r--gcc/config/avr/avr.c2
-rw-r--r--gcc/config/bfin/bfin.c2
-rw-r--r--gcc/config/darwin.c6
-rw-r--r--gcc/config/i386/i386.c12
-rw-r--r--gcc/config/msp430/msp430.c2
-rw-r--r--gcc/config/nds32/nds32.c8
-rw-r--r--gcc/config/powerpcspe/powerpcspe-c.c7
-rw-r--r--gcc/config/powerpcspe/powerpcspe.c14
-rw-r--r--gcc/config/rs6000/rs6000-c.c7
-rw-r--r--gcc/config/rs6000/rs6000.c14
-rw-r--r--gcc/config/s390/s390.c4
-rw-r--r--gcc/cp/ChangeLog8
-rw-r--r--gcc/cp/cvt.c2
-rw-r--r--gcc/cp/decl.c2
-rw-r--r--gcc/cp/mangle.c2
-rw-r--r--gcc/cp/semantics.c2
-rw-r--r--gcc/dbxout.c8
-rw-r--r--gcc/dwarf2out.c4
-rw-r--r--gcc/expr.c2
-rw-r--r--gcc/fold-const-call.c12
-rw-r--r--gcc/fold-const.c123
-rw-r--r--gcc/fortran/ChangeLog10
-rw-r--r--gcc/fortran/target-memory.c2
-rw-r--r--gcc/fortran/trans-const.c2
-rw-r--r--gcc/fortran/trans-expr.c2
-rw-r--r--gcc/fortran/trans-intrinsic.c15
-rw-r--r--gcc/gimple-fold.c2
-rw-r--r--gcc/gimple-ssa-warn-alloca.c6
-rw-r--r--gcc/gimple.c11
-rw-r--r--gcc/godump.c2
-rw-r--r--gcc/graphite-sese-to-poly.c2
-rw-r--r--gcc/internal-fn.c2
-rw-r--r--gcc/ipa-cp.c4
-rw-r--r--gcc/ipa-polymorphic-call.c5
-rw-r--r--gcc/ipa-prop.c7
-rw-r--r--gcc/lto/ChangeLog5
-rw-r--r--gcc/lto/lto.c2
-rw-r--r--gcc/match.pd185
-rw-r--r--gcc/objc/ChangeLog5
-rw-r--r--gcc/objc/objc-act.c12
-rw-r--r--gcc/omp-low.c10
-rw-r--r--gcc/print-tree.c4
-rw-r--r--gcc/stmt.c4
-rw-r--r--gcc/stor-layout.c6
-rw-r--r--gcc/tree-affine.c4
-rw-r--r--gcc/tree-cfg.c4
-rw-r--r--gcc/tree-data-ref.c41
-rw-r--r--gcc/tree-dump.c2
-rw-r--r--gcc/tree-inline.c4
-rw-r--r--gcc/tree-predcom.c3
-rw-r--r--gcc/tree-pretty-print.c2
-rw-r--r--gcc/tree-scalar-evolution.c7
-rw-r--r--gcc/tree-ssa-address.c4
-rw-r--r--gcc/tree-ssa-ccp.c10
-rw-r--r--gcc/tree-ssa-loop-ivopts.c4
-rw-r--r--gcc/tree-ssa-loop-niter.c44
-rw-r--r--gcc/tree-ssa-phiopt.c12
-rw-r--r--gcc/tree-ssa-pre.c20
-rw-r--r--gcc/tree-ssa-sccvn.c11
-rw-r--r--gcc/tree-ssa-structalias.c2
-rw-r--r--gcc/tree-ssa-uninit.c4
-rw-r--r--gcc/tree-ssanames.c6
-rw-r--r--gcc/tree-switch-conversion.c14
-rw-r--r--gcc/tree-vect-loop-manip.c11
-rw-r--r--gcc/tree-vect-patterns.c2
-rw-r--r--gcc/tree-vect-stmts.c16
-rw-r--r--gcc/tree-vrp.c220
-rw-r--r--gcc/tree.c57
-rw-r--r--gcc/tree.h129
-rw-r--r--gcc/ubsan.c4
-rw-r--r--gcc/wide-int.h64
86 files changed, 896 insertions, 559 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index ed487273fb9..351b1f01379 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,110 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * wide-int.h (wide_int_ref_storage): Make host_dependent_precision
+ a template parameter.
+ (WIDE_INT_REF_FOR): Update accordingly.
+ * tree.h (wi::int_traits <const_tree>): Delete.
+ (wi::tree_to_widest_ref, wi::tree_to_offset_ref): New typedefs.
+ (wi::to_widest, wi::to_offset): Use them. Expand commentary.
+ (wi::tree_to_wide_ref): New typedef.
+ (wi::to_wide): New function.
+ * calls.c (get_size_range): Use wi::to_wide when operating on
+ trees as wide_ints.
+ * cgraph.c (cgraph_node::create_thunk): Likewise.
+ * config/i386/i386.c (ix86_data_alignment): Likewise.
+ (ix86_local_alignment): Likewise.
+ * dbxout.c (stabstr_O): Likewise.
+ * dwarf2out.c (add_scalar_info, gen_enumeration_type_die): Likewise.
+ * expr.c (const_vector_from_tree): Likewise.
+ * fold-const-call.c (host_size_t_cst_p, fold_const_call_1): Likewise.
+ * fold-const.c (may_negate_without_overflow_p, negate_expr_p)
+ (fold_negate_expr_1, int_const_binop_1, const_binop)
+ (fold_convert_const_int_from_real, optimize_bit_field_compare)
+ (all_ones_mask_p, sign_bit_p, unextend, extract_muldiv_1)
+ (fold_div_compare, fold_single_bit_test, fold_plusminus_mult_expr)
+ (pointer_may_wrap_p, expr_not_equal_to, fold_binary_loc)
+ (fold_ternary_loc, multiple_of_p, fold_negate_const, fold_abs_const)
+ (fold_not_const, round_up_loc): Likewise.
+ * gimple-fold.c (gimple_fold_indirect_ref): Likewise.
+ * gimple-ssa-warn-alloca.c (alloca_call_type_by_arg): Likewise.
+ (alloca_call_type): Likewise.
+ * gimple.c (preprocess_case_label_vec_for_gimple): Likewise.
+ * godump.c (go_output_typedef): Likewise.
+ * graphite-sese-to-poly.c (tree_int_to_gmp): Likewise.
+ * internal-fn.c (get_min_precision): Likewise.
+ * ipa-cp.c (ipcp_store_vr_results): Likewise.
+ * ipa-polymorphic-call.c
+ (ipa_polymorphic_call_context::ipa_polymorphic_call_context): Likewise.
+ * ipa-prop.c (ipa_print_node_jump_functions_for_edge): Likewise.
+ (ipa_modify_call_arguments): Likewise.
+ * match.pd: Likewise.
+ * omp-low.c (scan_omp_1_op, lower_omp_ordered_clauses): Likewise.
+ * print-tree.c (print_node_brief, print_node): Likewise.
+ * stmt.c (expand_case): Likewise.
+ * stor-layout.c (layout_type): Likewise.
+ * tree-affine.c (tree_to_aff_combination): Likewise.
+ * tree-cfg.c (group_case_labels_stmt): Likewise.
+ * tree-data-ref.c (dr_analyze_indices): Likewise.
+ (prune_runtime_alias_test_list): Likewise.
+ * tree-dump.c (dequeue_and_dump): Likewise.
+ * tree-inline.c (remap_gimple_op_r, copy_tree_body_r): Likewise.
+ * tree-predcom.c (is_inv_store_elimination_chain): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ * tree-scalar-evolution.c (iv_can_overflow_p): Likewise.
+ (simple_iv_with_niters): Likewise.
+ * tree-ssa-address.c (addr_for_mem_ref): Likewise.
+ * tree-ssa-ccp.c (ccp_finalize, evaluate_stmt): Likewise.
+ * tree-ssa-loop-ivopts.c (constant_multiple_of): Likewise.
+ * tree-ssa-loop-niter.c (split_to_var_and_offset)
+ (refine_value_range_using_guard, number_of_iterations_ne_max)
+ (number_of_iterations_lt_to_ne, number_of_iterations_lt)
+ (get_cst_init_from_scev, record_nonwrapping_iv)
+ (scev_var_range_cant_overflow): Likewise.
+ * tree-ssa-phiopt.c (minmax_replacement): Likewise.
+ * tree-ssa-pre.c (compute_avail): Likewise.
+ * tree-ssa-sccvn.c (vn_reference_fold_indirect): Likewise.
+ (vn_reference_maybe_forwprop_address, valueized_wider_op): Likewise.
+ * tree-ssa-structalias.c (get_constraint_for_ptr_offset): Likewise.
+ * tree-ssa-uninit.c (is_pred_expr_subset_of): Likewise.
+ * tree-ssanames.c (set_nonzero_bits, get_nonzero_bits): Likewise.
+ * tree-switch-conversion.c (collect_switch_conv_info, array_value_type)
+ (dump_case_nodes, try_switch_expansion): Likewise.
+ * tree-vect-loop-manip.c (vect_gen_vector_loop_niters): Likewise.
+ (vect_do_peeling): Likewise.
+ * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise.
+ * tree-vect-stmts.c (vectorizable_load): Likewise.
+ * tree-vrp.c (compare_values_warnv, vrp_int_const_binop): Likewise.
+ (zero_nonzero_bits_from_vr, ranges_from_anti_range): Likewise.
+ (extract_range_from_binary_expr_1, adjust_range_with_scev): Likewise.
+ (overflow_comparison_p_1, register_edge_assert_for_2): Likewise.
+ (is_masked_range_test, find_switch_asserts, maybe_set_nonzero_bits)
+ (vrp_evaluate_conditional_warnv_with_ops, intersect_ranges): Likewise.
+ (range_fits_type_p, two_valued_val_range_p, vrp_finalize): Likewise.
+ (evrp_dom_walker::before_dom_children): Likewise.
+ * tree.c (cache_integer_cst, real_value_from_int_cst, integer_zerop)
+ (integer_all_onesp, integer_pow2p, integer_nonzerop, tree_log2)
+ (tree_floor_log2, tree_ctz, mem_ref_offset, tree_int_cst_sign_bit)
+ (tree_int_cst_sgn, get_unwidened, int_fits_type_p): Likewise.
+ (get_type_static_bounds, num_ending_zeros, drop_tree_overflow)
+ (get_range_pos_neg): Likewise.
+ * ubsan.c (ubsan_expand_ptr_ifn): Likewise.
+ * config/darwin.c (darwin_mergeable_constant_section): Likewise.
+ * config/aarch64/aarch64.c (aapcs_vfp_sub_candidate): Likewise.
+ * config/arm/arm.c (aapcs_vfp_sub_candidate): Likewise.
+ * config/avr/avr.c (avr_fold_builtin): Likewise.
+ * config/bfin/bfin.c (bfin_local_alignment): Likewise.
+ * config/msp430/msp430.c (msp430_attr): Likewise.
+ * config/nds32/nds32.c (nds32_insert_attributes): Likewise.
+ * config/powerpcspe/powerpcspe-c.c
+ (altivec_resolve_overloaded_builtin): Likewise.
+ * config/powerpcspe/powerpcspe.c (rs6000_aggregate_candidate)
+ (rs6000_expand_ternop_builtin): Likewise.
+ * config/rs6000/rs6000-c.c
+ (altivec_resolve_overloaded_builtin): Likewise.
+ * config/rs6000/rs6000.c (rs6000_aggregate_candidate): Likewise.
+ (rs6000_expand_ternop_builtin): Likewise.
+ * config/s390/s390.c (s390_handle_hotpatch_attribute): Likewise.
+
2017-10-10 Bin Cheng <bin.cheng@arm.com>
* tree-vect-loop-manip.c (rename_variables_in_bb): Rename PHI nodes
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 6b70bf1352b..3e1f53762c0 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,8 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * gcc-interface/decl.c (annotate_value): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-10-09 Hristian Kirtchev <kirtchev@adacore.com>
* sem_unit.adb (Find_Enclosing_Scope): Do not treat a block statement
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index e6cd8d6ba50..e0d7a5f5568 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -8070,7 +8070,7 @@ annotate_value (tree gnu_size)
can appear for discriminants in expressions for variants. */
if (tree_int_cst_sgn (gnu_size) < 0)
{
- tree t = wide_int_to_tree (sizetype, wi::neg (gnu_size));
+ tree t = wide_int_to_tree (sizetype, -wi::to_wide (gnu_size));
tcode = Negate_Expr;
ops[0] = UI_From_gnu (t);
}
@@ -8174,7 +8174,8 @@ annotate_value (tree gnu_size)
if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (gnu_size, 1);
- wide_int signed_op1 = wi::sext (op1, TYPE_PRECISION (sizetype));
+ wide_int signed_op1 = wi::sext (wi::to_wide (op1),
+ TYPE_PRECISION (sizetype));
if (wi::neg_p (signed_op1))
{
op1 = wide_int_to_tree (sizetype, wi::neg (signed_op1));
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 49d055aeb4d..b63673035f4 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,13 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * c-ada-spec.c (dump_generic_ada_node): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * c-common.c (pointer_int_sum): Likewise.
+ * c-pretty-print.c (pp_c_integer_constant): Likewise.
+ * c-warn.c (match_case_to_enum_1): Likewise.
+ (c_do_switch_warnings): Likewise.
+ (maybe_warn_shift_overflow): Likewise.
+
2017-10-10 Jakub Jelinek <jakub@redhat.com>
PR c/82437
diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c
index 834360f347e..95aacd1697a 100644
--- a/gcc/c-family/c-ada-spec.c
+++ b/gcc/c-family/c-ada-spec.c
@@ -2362,7 +2362,7 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type, int spc,
pp_unsigned_wide_integer (buffer, tree_to_uhwi (node));
else
{
- wide_int val = node;
+ wide_int val = wi::to_wide (node);
int i;
if (wi::neg_p (val))
{
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index b3ec3a0f7e6..09594e44e6c 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -3158,7 +3158,7 @@ pointer_int_sum (location_t loc, enum tree_code resultcode,
convert (TREE_TYPE (intop), size_exp));
intop = convert (sizetype, t);
if (TREE_OVERFLOW_P (intop) && !TREE_OVERFLOW (t))
- intop = wide_int_to_tree (TREE_TYPE (intop), intop);
+ intop = wide_int_to_tree (TREE_TYPE (intop), wi::to_wide (intop));
}
/* Create the sum or difference. */
diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c
index 745f0fd6011..0f48b9e958a 100644
--- a/gcc/c-family/c-pretty-print.c
+++ b/gcc/c-family/c-pretty-print.c
@@ -916,9 +916,9 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i)
pp_unsigned_wide_integer (pp, tree_to_uhwi (i));
else
{
- wide_int wi = i;
+ wide_int wi = wi::to_wide (i);
- if (wi::lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i))))
+ if (wi::lt_p (wi::to_wide (i), 0, TYPE_SIGN (TREE_TYPE (i))))
{
pp_minus (pp);
wi = -wi;
diff --git a/gcc/c-family/c-warn.c b/gcc/c-family/c-warn.c
index 717fe7f54ee..cb1db0327c3 100644
--- a/gcc/c-family/c-warn.c
+++ b/gcc/c-family/c-warn.c
@@ -1240,11 +1240,11 @@ match_case_to_enum_1 (tree key, tree type, tree label)
char buf[WIDE_INT_PRINT_BUFFER_SIZE];
if (tree_fits_uhwi_p (key))
- print_dec (key, buf, UNSIGNED);
+ print_dec (wi::to_wide (key), buf, UNSIGNED);
else if (tree_fits_shwi_p (key))
- print_dec (key, buf, SIGNED);
+ print_dec (wi::to_wide (key), buf, SIGNED);
else
- print_hex (key, buf);
+ print_hex (wi::to_wide (key), buf);
if (TYPE_NAME (type) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)),
@@ -1346,8 +1346,8 @@ c_do_switch_warnings (splay_tree cases, location_t switch_location,
/* If there's a case value > 1 or < 0, that is outside bool
range, warn. */
if (outside_range_p
- || (max && wi::gts_p (max, 1))
- || (min && wi::lts_p (min, 0))
+ || (max && wi::gts_p (wi::to_wide (max), 1))
+ || (min && wi::lts_p (wi::to_wide (min), 0))
/* And handle the
switch (boolean)
{
@@ -1357,8 +1357,8 @@ c_do_switch_warnings (splay_tree cases, location_t switch_location,
}
case, where we want to warn. */
|| (default_node
- && max && wi::eq_p (max, 1)
- && min && wi::eq_p (min, 0)))
+ && max && wi::to_wide (max) == 1
+ && min && wi::to_wide (min) == 0))
warning_at (switch_location, OPT_Wswitch_bool,
"switch condition has boolean value");
}
@@ -2263,7 +2263,7 @@ maybe_warn_shift_overflow (location_t loc, tree op0, tree op1)
if (TYPE_UNSIGNED (type0))
return false;
- unsigned int min_prec = (wi::min_precision (op0, SIGNED)
+ unsigned int min_prec = (wi::min_precision (wi::to_wide (op0), SIGNED)
+ TREE_INT_CST_LOW (op1));
/* Handle the case of left-shifting 1 into the sign bit.
* However, shifting 1 _out_ of the sign bit, as in
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index ae9d63991f0..822d0227ee8 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,10 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * c-parser.c (c_parser_cilk_clause_vectorlength): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * c-typeck.c (build_c_cast, c_finish_omp_clauses): Likewise.
+ (c_tree_equal): Likewise.
+
2017-10-04 David Malcolm <dmalcolm@redhat.com>
* c-decl.c (push_parm_decl): Store c_parm's location into the
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 1a5e39edf45..a622e2a89c9 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -17832,7 +17832,7 @@ c_parser_cilk_clause_vectorlength (c_parser *parser, tree clauses,
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr)))
error_at (loc, "vectorlength must be an integer constant");
- else if (wi::exact_log2 (expr) == -1)
+ else if (wi::exact_log2 (wi::to_wide (expr)) == -1)
error_at (loc, "vectorlength must be a power of 2");
else
{
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index 2a10813190e..cb9c589e061 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -5684,7 +5684,7 @@ build_c_cast (location_t loc, tree type, tree expr)
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
- value = wide_int_to_tree (TREE_TYPE (value), value);
+ value = wide_int_to_tree (TREE_TYPE (value), wi::to_wide (value));
}
}
@@ -13504,7 +13504,7 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
- bool neg = wi::neg_p ((wide_int) offset);
+ bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (c),
neg ? MINUS_EXPR : PLUS_EXPR,
@@ -14237,7 +14237,7 @@ c_tree_equal (tree t1, tree t2)
switch (code1)
{
case INTEGER_CST:
- return wi::eq_p (t1, t2);
+ return wi::to_wide (t1) == wi::to_wide (t2);
case REAL_CST:
return real_equal (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2));
diff --git a/gcc/calls.c b/gcc/calls.c
index 72cf9e016c8..4d54fc6de05 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -1293,8 +1293,6 @@ get_size_range (tree exp, tree range[2])
tree exptype = TREE_TYPE (exp);
unsigned expprec = TYPE_PRECISION (exptype);
- wide_int wzero = wi::zero (expprec);
- wide_int wmaxval = wide_int (TYPE_MAX_VALUE (exptype));
bool signed_p = !TYPE_UNSIGNED (exptype);
@@ -1302,7 +1300,7 @@ get_size_range (tree exp, tree range[2])
{
if (signed_p)
{
- if (wi::les_p (max, wzero))
+ if (wi::les_p (max, 0))
{
/* EXP is not in a strictly negative range. That means
it must be in some (not necessarily strictly) positive
@@ -1310,24 +1308,24 @@ get_size_range (tree exp, tree range[2])
conversions negative values end up converted to large
positive values, and otherwise they are not valid sizes,
the resulting range is in both cases [0, TYPE_MAX]. */
- min = wzero;
- max = wmaxval;
+ min = wi::zero (expprec);
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
- else if (wi::les_p (min - 1, wzero))
+ else if (wi::les_p (min - 1, 0))
{
/* EXP is not in a negative-positive range. That means EXP
is either negative, or greater than max. Since negative
sizes are invalid make the range [MAX + 1, TYPE_MAX]. */
min = max + 1;
- max = wmaxval;
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
else
{
max = min - 1;
- min = wzero;
+ min = wi::zero (expprec);
}
}
- else if (wi::eq_p (wzero, min - 1))
+ else if (wi::eq_p (0, min - 1))
{
/* EXP is unsigned and not in the range [1, MAX]. That means
it's either zero or greater than MAX. Even though 0 would
@@ -1335,12 +1333,12 @@ get_size_range (tree exp, tree range[2])
[MAX, TYPE_MAX] so that when MAX is greater than the limit
the whole range is diagnosed. */
min = max + 1;
- max = wmaxval;
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
else
{
max = min - 1;
- min = wzero;
+ min = wi::zero (expprec);
}
}
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 3d0cefbd46b..d8da3dd76cd 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -626,7 +626,7 @@ cgraph_node::create_thunk (tree alias, tree, bool this_adjusting,
/* Make sure that if VIRTUAL_OFFSET is in sync with VIRTUAL_VALUE. */
gcc_checking_assert (virtual_offset
- ? wi::eq_p (virtual_offset, virtual_value)
+ ? virtual_value == wi::to_wide (virtual_offset)
: virtual_value == 0);
node->thunk.fixed_offset = fixed_offset;
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index ee98a1f8228..6b3a7566bc1 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -11039,7 +11039,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -11069,7 +11070,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -11101,7 +11103,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 622218c60ef..c93ad95b1a6 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -5883,7 +5883,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -5913,7 +5914,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -5945,7 +5947,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index 62ddc579d86..d9c8277eff5 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -14495,7 +14495,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
break;
}
- tmap = wide_int_to_tree (map_type, arg[0]);
+ tmap = wide_int_to_tree (map_type, wi::to_wide (arg[0]));
map = TREE_INT_CST_LOW (tmap);
if (TREE_CODE (tval) != INTEGER_CST
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index ed9ea03682f..c95f82dc3ae 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -3318,7 +3318,7 @@ bfin_local_alignment (tree type, unsigned align)
memcpy can use 32 bit loads/stores. */
if (TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::gtu_p (TYPE_SIZE (type), 8)
+ && wi::gtu_p (wi::to_wide (TYPE_SIZE (type)), 8)
&& align < 32)
return 32;
return align;
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index b6dad70df0a..e633b88f6b0 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -1319,13 +1319,13 @@ darwin_mergeable_constant_section (tree exp,
if (TREE_CODE (size) == INTEGER_CST)
{
- if (wi::eq_p (size, 4))
+ if (wi::to_wide (size) == 4)
return darwin_sections[literal4_section];
- else if (wi::eq_p (size, 8))
+ else if (wi::to_wide (size) == 8)
return darwin_sections[literal8_section];
else if (HAVE_GAS_LITERAL16
&& TARGET_64BIT
- && wi::eq_p (size, 16))
+ && wi::to_wide (size) == 16)
return darwin_sections[literal16_section];
}
}
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 1ee8351c21f..d6f913ac411 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -31670,12 +31670,12 @@ ix86_data_alignment (tree type, int align, bool opt)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
- if (wi::geu_p (TYPE_SIZE (type), max_align_compat)
+ if (wi::geu_p (wi::to_wide (TYPE_SIZE (type)), max_align_compat)
&& align < max_align_compat)
align = max_align_compat;
- if (wi::geu_p (TYPE_SIZE (type), max_align)
- && align < max_align)
- align = max_align;
+ if (wi::geu_p (wi::to_wide (TYPE_SIZE (type)), max_align)
+ && align < max_align)
+ align = max_align;
}
/* x86-64 ABI requires arrays greater than 16 bytes to be aligned
@@ -31685,7 +31685,7 @@ ix86_data_alignment (tree type, int align, bool opt)
if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::geu_p (TYPE_SIZE (type), 128)
+ && wi::geu_p (wi::to_wide (TYPE_SIZE (type)), 128)
&& align < 128)
return 128;
}
@@ -31804,7 +31804,7 @@ ix86_local_alignment (tree exp, machine_mode mode,
!= TYPE_MAIN_VARIANT (va_list_type_node)))
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::geu_p (TYPE_SIZE (type), 128)
+ && wi::geu_p (wi::to_wide (TYPE_SIZE (type)), 128)
&& align < 128)
return 128;
}
diff --git a/gcc/config/msp430/msp430.c b/gcc/config/msp430/msp430.c
index 80ea1190fba..4f32fc855e5 100644
--- a/gcc/config/msp430/msp430.c
+++ b/gcc/config/msp430/msp430.c
@@ -1877,7 +1877,7 @@ msp430_attr (tree * node,
break;
case INTEGER_CST:
- if (wi::gtu_p (value, 63))
+ if (wi::gtu_p (wi::to_wide (value), 63))
/* Allow the attribute to be added - the linker script
being used may still recognise this value. */
warning (OPT_Wattributes,
diff --git a/gcc/config/nds32/nds32.c b/gcc/config/nds32/nds32.c
index 65095ffaff1..c1eb66abc17 100644
--- a/gcc/config/nds32/nds32.c
+++ b/gcc/config/nds32/nds32.c
@@ -2576,8 +2576,8 @@ nds32_insert_attributes (tree decl, tree *attributes)
id = TREE_VALUE (id_list);
/* Issue error if it is not a valid integer value. */
if (TREE_CODE (id) != INTEGER_CST
- || wi::ltu_p (id, lower_bound)
- || wi::gtu_p (id, upper_bound))
+ || wi::ltu_p (wi::to_wide (id), lower_bound)
+ || wi::gtu_p (wi::to_wide (id), upper_bound))
error ("invalid id value for interrupt/exception attribute");
/* Advance to next id. */
@@ -2604,8 +2604,8 @@ nds32_insert_attributes (tree decl, tree *attributes)
/* 3. Check valid integer value for reset. */
if (TREE_CODE (id) != INTEGER_CST
- || wi::ltu_p (id, lower_bound)
- || wi::gtu_p (id, upper_bound))
+ || wi::ltu_p (wi::to_wide (id), lower_bound)
+ || wi::gtu_p (wi::to_wide (id), upper_bound))
error ("invalid id value for reset attribute");
/* 4. Check valid function for nmi/warm. */
diff --git a/gcc/config/powerpcspe/powerpcspe-c.c b/gcc/config/powerpcspe/powerpcspe-c.c
index db041531209..661480fd479 100644
--- a/gcc/config/powerpcspe/powerpcspe-c.c
+++ b/gcc/config/powerpcspe/powerpcspe-c.c
@@ -6055,7 +6055,8 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
/* If the second argument is an integer constant, if the value is in
the expected range, generate the built-in code if we can. We need
64-bit and direct move to extract the small integer vectors. */
- if (TREE_CODE (arg2) == INTEGER_CST && wi::ltu_p (arg2, nunits))
+ if (TREE_CODE (arg2) == INTEGER_CST
+ && wi::ltu_p (wi::to_wide (arg2), nunits))
{
switch (mode)
{
@@ -6217,7 +6218,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::ltu_p (arg2, 2))
+ && wi::ltu_p (wi::to_wide (arg2), 2))
{
tree call = NULL_TREE;
@@ -6233,7 +6234,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
}
else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::eq_p (arg2, 0))
+ && wi::eq_p (wi::to_wide (arg2), 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
diff --git a/gcc/config/powerpcspe/powerpcspe.c b/gcc/config/powerpcspe/powerpcspe.c
index 12af88417ba..528f62da71d 100644
--- a/gcc/config/powerpcspe/powerpcspe.c
+++ b/gcc/config/powerpcspe/powerpcspe.c
@@ -11617,7 +11617,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -11647,7 +11648,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -11679,7 +11681,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -15936,14 +15939,15 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
/* Check whether the 2nd and 3rd arguments are integer constants and in
range and prepare arguments. */
STRIP_NOPS (arg1);
- if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
+ if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
{
error ("argument 2 must be 0 or 1");
return CONST0_RTX (tmode);
}
STRIP_NOPS (arg2);
- if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || wi::geu_p (wi::to_wide (arg2), 16))
{
error ("argument 3 must be in the range 0..15");
return CONST0_RTX (tmode);
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index 2a916b43873..8e581249b74 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -6253,7 +6253,8 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
/* If the second argument is an integer constant, if the value is in
the expected range, generate the built-in code if we can. We need
64-bit and direct move to extract the small integer vectors. */
- if (TREE_CODE (arg2) == INTEGER_CST && wi::ltu_p (arg2, nunits))
+ if (TREE_CODE (arg2) == INTEGER_CST
+ && wi::ltu_p (wi::to_wide (arg2), nunits))
{
switch (mode)
{
@@ -6415,7 +6416,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::ltu_p (arg2, 2))
+ && wi::ltu_p (wi::to_wide (arg2), 2))
{
tree call = NULL_TREE;
@@ -6431,7 +6432,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
}
else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::eq_p (arg2, 0))
+ && wi::eq_p (wi::to_wide (arg2), 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index f98ef93e6d4..12ddd970be5 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -10958,7 +10958,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -10988,7 +10989,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -11020,7 +11022,8 @@ rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
@@ -15091,14 +15094,15 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
/* Check whether the 2nd and 3rd arguments are integer constants and in
range and prepare arguments. */
STRIP_NOPS (arg1);
- if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
+ if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
{
error ("argument 2 must be 0 or 1");
return CONST0_RTX (tmode);
}
STRIP_NOPS (arg2);
- if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || wi::geu_p (wi::to_wide (arg2), 16))
{
error ("argument 3 must be in the range 0..15");
return CONST0_RTX (tmode);
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 52a82df0044..3ef3c197fed 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -1102,11 +1102,11 @@ s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
err = 1;
else if (TREE_CODE (expr) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr))
- || wi::gtu_p (expr, s390_hotpatch_hw_max))
+ || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
err = 1;
else if (TREE_CODE (expr2) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
- || wi::gtu_p (expr2, s390_hotpatch_hw_max))
+ || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
err = 1;
else
err = 0;
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 082327ed48d..0d69bda285d 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,11 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * cvt.c (ignore_overflows): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * decl.c (check_array_designated_initializer): Likewise.
+ * mangle.c (write_integer_cst): Likewise.
+ * semantics.c (cp_finish_omp_clause_depend_sink): Likewise.
+
2017-10-10 Nathan Sidwell <nathan@acm.org>
* name-lookup.c (set_global_binding): Don't deal with STAT_HACK.
diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c
index a3bd4a137d8..d82293b421d 100644
--- a/gcc/cp/cvt.c
+++ b/gcc/cp/cvt.c
@@ -582,7 +582,7 @@ ignore_overflows (tree expr, tree orig)
{
gcc_assert (!TREE_OVERFLOW (orig));
/* Ensure constant sharing. */
- expr = wide_int_to_tree (TREE_TYPE (expr), expr);
+ expr = wide_int_to_tree (TREE_TYPE (expr), wi::to_wide (expr));
}
return expr;
}
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 0e70bb5d59d..5a0305597b4 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -5298,7 +5298,7 @@ check_array_designated_initializer (constructor_elt *ce,
== INTEGER_CST))
{
/* A C99 designator is OK if it matches the current index. */
- if (wi::eq_p (ce_index, index))
+ if (wi::to_wide (ce_index) == index)
return true;
else
sorry ("non-trivial designated initializers not supported");
diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c
index 6046906e77d..64397cdddcb 100644
--- a/gcc/cp/mangle.c
+++ b/gcc/cp/mangle.c
@@ -1725,7 +1725,7 @@ write_integer_cst (const tree cst)
type = c_common_signed_or_unsigned_type (1, TREE_TYPE (cst));
base = build_int_cstu (type, chunk);
- n = wide_int_to_tree (type, cst);
+ n = wide_int_to_tree (type, wi::to_wide (cst));
if (sign < 0)
{
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index d96423f2348..77c71e71bcf 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -5761,7 +5761,7 @@ cp_finish_omp_clause_depend_sink (tree sink_clause)
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
- bool neg = wi::neg_p ((wide_int) offset);
+ bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
decl = mark_rvalue_use (decl);
decl = convert_from_reference (decl);
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index ea7c97ccb31..0615e84fc83 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -714,7 +714,7 @@ stabstr_O (tree cst)
/* If the value is zero, the base indicator will serve as the value
all by itself. */
- if (wi::eq_p (cst, 0))
+ if (wi::to_wide (cst) == 0)
return;
/* GDB wants constants with no extra leading "1" bits, so
@@ -722,19 +722,19 @@ stabstr_O (tree cst)
present. */
if (res_pres == 1)
{
- digit = wi::extract_uhwi (cst, prec - 1, 1);
+ digit = wi::extract_uhwi (wi::to_wide (cst), prec - 1, 1);
stabstr_C ('0' + digit);
}
else if (res_pres == 2)
{
- digit = wi::extract_uhwi (cst, prec - 2, 2);
+ digit = wi::extract_uhwi (wi::to_wide (cst), prec - 2, 2);
stabstr_C ('0' + digit);
}
prec -= res_pres;
for (i = prec - 3; i >= 0; i = i - 3)
{
- digit = wi::extract_uhwi (cst, i, 3);
+ digit = wi::extract_uhwi (wi::to_wide (cst), i, 3);
stabstr_C ('0' + digit);
}
}
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index e97ceb61b46..528c1852e5d 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -19820,7 +19820,7 @@ add_scalar_info (dw_die_ref die, enum dwarf_attribute attr, tree value,
the precision of its type. The precision and signedness
of the type will be necessary to re-interpret it
unambiguously. */
- add_AT_wide (die, attr, value);
+ add_AT_wide (die, attr, wi::to_wide (value));
return;
}
@@ -21236,7 +21236,7 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die)
/* Enumeration constants may be wider than HOST_WIDE_INT. Handle
that here. TODO: This should be re-worked to use correct
signed/unsigned double tags for all cases. */
- add_AT_wide (enum_die, DW_AT_const_value, value);
+ add_AT_wide (enum_die, DW_AT_const_value, wi::to_wide (value));
}
add_gnat_descriptive_type_attribute (type_die, type, context_die);
diff --git a/gcc/expr.c b/gcc/expr.c
index baaef260320..d2e4d042f1b 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -11788,7 +11788,7 @@ const_vector_from_tree (tree exp)
RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
inner);
else
- RTVEC_ELT (v, i) = immed_wide_int_const (elt, inner);
+ RTVEC_ELT (v, i) = immed_wide_int_const (wi::to_wide (elt), inner);
}
return gen_rtx_CONST_VECTOR (mode, v);
diff --git a/gcc/fold-const-call.c b/gcc/fold-const-call.c
index 71f0b524680..98ac0911743 100644
--- a/gcc/fold-const-call.c
+++ b/gcc/fold-const-call.c
@@ -60,7 +60,8 @@ host_size_t_cst_p (tree t, size_t *size_out)
{
if (types_compatible_p (size_type_node, TREE_TYPE (t))
&& integer_cst_p (t)
- && wi::min_precision (t, UNSIGNED) <= sizeof (size_t) * CHAR_BIT)
+ && (wi::min_precision (wi::to_wide (t), UNSIGNED)
+ <= sizeof (size_t) * CHAR_BIT))
{
*size_out = tree_to_uhwi (t);
return true;
@@ -1041,8 +1042,8 @@ fold_const_call_1 (combined_fn fn, tree type, tree arg)
if (SCALAR_INT_MODE_P (mode))
{
wide_int result;
- if (fold_const_call_ss (&result, fn, arg, TYPE_PRECISION (type),
- TREE_TYPE (arg)))
+ if (fold_const_call_ss (&result, fn, wi::to_wide (arg),
+ TYPE_PRECISION (type), TREE_TYPE (arg)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
@@ -1322,7 +1323,8 @@ fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1)
/* real, int -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
- arg1, REAL_MODE_FORMAT (mode)))
+ wi::to_wide (arg1),
+ REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
@@ -1336,7 +1338,7 @@ fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1)
{
/* int, real -> real. */
REAL_VALUE_TYPE result;
- if (fold_const_call_sss (&result, fn, arg0,
+ if (fold_const_call_sss (&result, fn, wi::to_wide (arg0),
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index aac62f83321..f2e1cea2038 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -360,7 +360,7 @@ may_negate_without_overflow_p (const_tree t)
if (TYPE_UNSIGNED (type))
return false;
- return !wi::only_sign_bit_p (t);
+ return !wi::only_sign_bit_p (wi::to_wide (t));
}
/* Determine whether an expression T can be cheaply negated using
@@ -452,9 +452,11 @@ negate_expr_p (tree t)
if (INTEGRAL_TYPE_P (TREE_TYPE (t))
&& ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (t))
&& ! ((TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
- && wi::popcount (wi::abs (TREE_OPERAND (t, 0))) != 1)
+ && (wi::popcount
+ (wi::abs (wi::to_wide (TREE_OPERAND (t, 0))))) != 1)
|| (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && wi::popcount (wi::abs (TREE_OPERAND (t, 1))) != 1)))
+ && (wi::popcount
+ (wi::abs (wi::to_wide (TREE_OPERAND (t, 1))))) != 1)))
break;
/* Fall through. */
@@ -503,7 +505,7 @@ negate_expr_p (tree t)
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::to_wide (op1) == TYPE_PRECISION (type) - 1)
return true;
}
break;
@@ -695,7 +697,7 @@ fold_negate_expr_1 (location_t loc, tree t)
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::to_wide (op1) == TYPE_PRECISION (type) - 1)
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
@@ -959,20 +961,21 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2
}
-/* Combine two integer constants ARG1 and ARG2 under operation CODE
+/* Combine two integer constants PARG1 and PARG2 under operation CODE
to produce a new constant. Return NULL_TREE if we don't know how
to evaluate CODE at compile-time. */
static tree
-int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
+int_const_binop_1 (enum tree_code code, const_tree parg1, const_tree parg2,
int overflowable)
{
wide_int res;
tree t;
- tree type = TREE_TYPE (arg1);
+ tree type = TREE_TYPE (parg1);
signop sign = TYPE_SIGN (type);
bool overflow = false;
+ wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
switch (code)
@@ -1106,7 +1109,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
t = force_fit_type (type, res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2)));
+ | TREE_OVERFLOW (parg1) | TREE_OVERFLOW (parg2)));
return t;
}
@@ -1258,7 +1261,7 @@ const_binop (enum tree_code code, tree arg1, tree arg2)
{
if (TREE_CODE (arg2) != INTEGER_CST)
return NULL_TREE;
- wide_int w2 = arg2;
+ wi::tree_to_wide_ref w2 = wi::to_wide (arg2);
f2.data.high = w2.elt (1);
f2.data.low = w2.ulow ();
f2.mode = SImode;
@@ -1909,7 +1912,7 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
if (real_less (&r, &l))
{
overflow = true;
- val = lt;
+ val = wi::to_wide (lt);
}
}
@@ -1922,7 +1925,7 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
if (real_less (&u, &r))
{
overflow = true;
- val = ut;
+ val = wi::to_wide (ut);
}
}
}
@@ -4037,7 +4040,7 @@ optimize_bit_field_compare (location_t loc, enum tree_code code,
if (lunsignedp)
{
- if (wi::lrshift (rhs, lbitsize) != 0)
+ if (wi::lrshift (wi::to_wide (rhs), lbitsize) != 0)
{
warning (0, "comparison is always %d due to width of bit-field",
code == NE_EXPR);
@@ -4046,7 +4049,7 @@ optimize_bit_field_compare (location_t loc, enum tree_code code,
}
else
{
- wide_int tem = wi::arshift (rhs, lbitsize - 1);
+ wide_int tem = wi::arshift (wi::to_wide (rhs), lbitsize - 1);
if (tem != 0 && tem != -1)
{
warning (0, "comparison is always %d due to width of bit-field",
@@ -4196,7 +4199,7 @@ all_ones_mask_p (const_tree mask, unsigned int size)
if (size > precision || TYPE_SIGN (type) == UNSIGNED)
return false;
- return wi::mask (size, false, precision) == mask;
+ return wi::mask (size, false, precision) == wi::to_wide (mask);
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
@@ -4222,7 +4225,7 @@ sign_bit_p (tree exp, const_tree val)
return NULL_TREE;
width = TYPE_PRECISION (t);
- if (wi::only_sign_bit_p (val, width))
+ if (wi::only_sign_bit_p (wi::to_wide (val), width))
return exp;
/* Handle extension from a narrower type. */
@@ -5449,7 +5452,8 @@ unextend (tree c, int p, int unsignedp, tree mask)
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1));
+ temp = build_int_cst (TREE_TYPE (c),
+ wi::extract_uhwi (wi::to_wide (c), p - 1, 1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
@@ -6055,7 +6059,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
/* For a constant, we can always simplify if we are a multiply
or (for divide and modulus) if it is a multiple of our constant. */
if (code == MULT_EXPR
- || wi::multiple_of_p (t, c, TYPE_SIGN (type)))
+ || wi::multiple_of_p (wi::to_wide (t), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
tree tem = const_binop (code, fold_convert (ctype, t),
fold_convert (ctype, c));
@@ -6172,7 +6177,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
&& (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0)))
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)),
+ wi::to_wide (op1))
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -6241,7 +6247,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
/* If it's a multiply or a division/modulus operation of a multiple
of our constant, do the operation and verify it doesn't overflow. */
if (code == MULT_EXPR
- || wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ || wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
op1 = const_binop (code, fold_convert (ctype, op1),
fold_convert (ctype, c));
@@ -6280,7 +6287,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
/* If the multiplication can overflow we cannot optimize this. */
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ && wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
*strict_overflow_p = true;
return omit_one_operand (type, integer_zero_node, op0);
@@ -6342,7 +6350,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
&& code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR
&& code != MULT_EXPR)))
{
- if (wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ if (wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
@@ -6351,7 +6360,8 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
const_binop (TRUNC_DIV_EXPR,
op1, c)));
}
- else if (wi::multiple_of_p (c, op1, TYPE_SIGN (type)))
+ else if (wi::multiple_of_p (wi::to_wide (c), wi::to_wide (op1),
+ TYPE_SIGN (type)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
@@ -6534,7 +6544,7 @@ fold_div_compare (enum tree_code code, tree c1, tree c2, tree *lo,
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, c1, c2); */
- wide_int val = wi::mul (c1, c2, sign, &overflow);
+ wide_int val = wi::mul (wi::to_wide (c1), wi::to_wide (c2), sign, &overflow);
prod = force_fit_type (type, val, -1, overflow);
*neg_overflow = false;
@@ -6544,7 +6554,7 @@ fold_div_compare (enum tree_code code, tree c1, tree c2, tree *lo,
*lo = prod;
/* Likewise *hi = int_const_binop (PLUS_EXPR, prod, tmp). */
- val = wi::add (prod, tmp, sign, &overflow);
+ val = wi::add (wi::to_wide (prod), wi::to_wide (tmp), sign, &overflow);
*hi = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (c1) >= 0)
@@ -6688,7 +6698,7 @@ fold_single_bit_test (location_t loc, enum tree_code code,
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
&& bitnum < TYPE_PRECISION (type)
- && wi::ltu_p (TREE_OPERAND (inner, 1),
+ && wi::ltu_p (wi::to_wide (TREE_OPERAND (inner, 1)),
TYPE_PRECISION (type) - bitnum))
{
bitnum += tree_to_uhwi (TREE_OPERAND (inner, 1));
@@ -6868,7 +6878,7 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type,
arg10 = build_one_cst (type);
/* As we canonicalize A - 2 to A + -2 get rid of that sign for
the purpose of this canonicalization. */
- if (wi::neg_p (arg1, TYPE_SIGN (TREE_TYPE (arg1)))
+ if (wi::neg_p (wi::to_wide (arg1), TYPE_SIGN (TREE_TYPE (arg1)))
&& negate_expr_p (arg1)
&& code == PLUS_EXPR)
{
@@ -6960,7 +6970,8 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type,
/* If the sum evaluated to a constant that is not -INF the multiplication
cannot overflow. */
if (TREE_CODE (tem) == INTEGER_CST
- && ! wi::eq_p (tem, wi::min_value (TYPE_PRECISION (utype), SIGNED)))
+ && (wi::to_wide (tem)
+ != wi::min_value (TYPE_PRECISION (utype), SIGNED)))
return fold_build2_loc (loc, MULT_EXPR, type,
fold_convert (type, tem), same);
@@ -8215,7 +8226,7 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset))
return true;
else
- wi_offset = offset;
+ wi_offset = wi::to_wide (offset);
bool overflow;
wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
@@ -9017,7 +9028,7 @@ expr_not_equal_to (tree t, const wide_int &w)
switch (TREE_CODE (t))
{
case INTEGER_CST:
- return wi::ne_p (t, w);
+ return wi::to_wide (t) != w;
case SSA_NAME:
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
@@ -9876,8 +9887,8 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
int width = TYPE_PRECISION (type), w;
- wide_int c1 = TREE_OPERAND (arg0, 1);
- wide_int c2 = arg1;
+ wide_int c1 = wi::to_wide (TREE_OPERAND (arg0, 1));
+ wide_int c2 = wi::to_wide (arg1);
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
if ((c1 & c2) == c1)
@@ -9979,7 +9990,7 @@ fold_binary_loc (location_t loc,
multiple of 1 << CST. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- wide_int cst1 = arg1;
+ wi::tree_to_wide_ref cst1 = wi::to_wide (arg1);
wide_int ncst1 = -cst1;
if ((cst1 & ncst1) == ncst1
&& multiple_of_p (type, arg0,
@@ -9993,8 +10004,9 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (arg0) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- wide_int warg1 = arg1;
- wide_int masked = mask_with_tz (type, warg1, TREE_OPERAND (arg0, 1));
+ wi::tree_to_wide_ref warg1 = wi::to_wide (arg1);
+ wide_int masked
+ = mask_with_tz (type, warg1, wi::to_wide (TREE_OPERAND (arg0, 1)));
if (masked == 0)
return omit_two_operands_loc (loc, type, build_zero_cst (type),
@@ -10021,7 +10033,7 @@ fold_binary_loc (location_t loc,
If B is constant and (B & M) == 0, fold into A & M. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- wide_int cst1 = arg1;
+ wi::tree_to_wide_ref cst1 = wi::to_wide (arg1);
if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0
&& INTEGRAL_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == PLUS_EXPR
@@ -10057,8 +10069,7 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
!= INTEGER_CST)
break;
- cst0 = TREE_OPERAND (pmop[which], 1);
- cst0 &= cst1;
+ cst0 = wi::to_wide (TREE_OPERAND (pmop[which], 1)) & cst1;
if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
{
if (cst0 != cst1)
@@ -10076,7 +10087,7 @@ fold_binary_loc (location_t loc,
omitted (assumed 0). */
if ((TREE_CODE (arg0) == PLUS_EXPR
|| (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
- && (cst1 & pmop[which]) == 0)
+ && (cst1 & wi::to_wide (pmop[which])) == 0)
pmop[which] = NULL;
break;
default:
@@ -10134,7 +10145,7 @@ fold_binary_loc (location_t loc,
{
prec = element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
+ wide_int mask = wide_int::from (wi::to_wide (arg1), prec, UNSIGNED);
if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
@@ -10177,7 +10188,7 @@ fold_binary_loc (location_t loc,
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
tree pow2 = build_int_cst (TREE_TYPE (sh_cnt),
- wi::exact_log2 (sval));
+ wi::exact_log2 (wi::to_wide (sval)));
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
@@ -10308,7 +10319,8 @@ fold_binary_loc (location_t loc,
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)),
+ && wi::umod_trunc (wi::to_wide (arg1)
+ + wi::to_wide (TREE_OPERAND (arg0, 1)),
prec) == 0)
return fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
@@ -10595,7 +10607,7 @@ fold_binary_loc (location_t loc,
prec = TYPE_PRECISION (itype);
/* Check for a valid shift count. */
- if (wi::ltu_p (arg001, prec))
+ if (wi::ltu_p (wi::to_wide (arg001), prec))
{
tree arg01 = TREE_OPERAND (arg0, 1);
tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
@@ -10671,7 +10683,7 @@ fold_binary_loc (location_t loc,
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
tree itype = TREE_TYPE (arg00);
- if (wi::eq_p (arg01, element_precision (itype) - 1))
+ if (wi::to_wide (arg01) == element_precision (itype) - 1)
{
if (TYPE_UNSIGNED (itype))
{
@@ -11421,7 +11433,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
(inner_width, outer_width - inner_width, false,
TYPE_PRECISION (TREE_TYPE (arg1)));
- wide_int common = mask & arg1;
+ wide_int common = mask & wi::to_wide (arg1);
if (common == mask)
{
tem_type = signed_type_for (TREE_TYPE (tem));
@@ -11644,7 +11656,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
/* Make sure that the perm value is in an acceptable
range. */
- wide_int t = val;
+ wi::tree_to_wide_ref t = wi::to_wide (val);
need_mask_canon |= wi::gtu_p (t, mask);
need_mask_canon2 |= wi::gtu_p (t, mask2);
unsigned int elt = t.to_uhwi () & mask;
@@ -11726,9 +11738,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
{
unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (op2);
unsigned bitsize = TYPE_PRECISION (TREE_TYPE (arg1));
- wide_int tem = wi::bit_and (arg0,
- wi::shifted_mask (bitpos, bitsize, true,
- TYPE_PRECISION (type)));
+ wide_int tem = (wi::to_wide (arg0)
+ & wi::shifted_mask (bitpos, bitsize, true,
+ TYPE_PRECISION (type)));
wide_int tem2
= wi::lshift (wi::zext (wi::to_wide (arg1, TYPE_PRECISION (type)),
bitsize), bitpos);
@@ -12548,7 +12560,8 @@ multiple_of_p (tree type, const_tree top, const_tree bottom)
op1 = TREE_OPERAND (top, 1);
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)),
+ wi::to_wide (op1))
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -13682,7 +13695,7 @@ fold_negate_const (tree arg0, tree type)
case INTEGER_CST:
{
bool overflow;
- wide_int val = wi::neg (arg0, &overflow);
+ wide_int val = wi::neg (wi::to_wide (arg0), &overflow);
t = force_fit_type (type, val, 1,
(overflow && ! TYPE_UNSIGNED (type))
|| TREE_OVERFLOW (arg0));
@@ -13729,7 +13742,7 @@ fold_abs_const (tree arg0, tree type)
{
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (!wi::neg_p (arg0, TYPE_SIGN (type)))
+ if (!wi::neg_p (wi::to_wide (arg0), TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
@@ -13737,7 +13750,7 @@ fold_abs_const (tree arg0, tree type)
else
{
bool overflow;
- wide_int val = wi::neg (arg0, &overflow);
+ wide_int val = wi::neg (wi::to_wide (arg0), &overflow);
t = force_fit_type (type, val, -1,
overflow | TREE_OVERFLOW (arg0));
}
@@ -13766,7 +13779,7 @@ fold_not_const (const_tree arg0, tree type)
{
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- return force_fit_type (type, wi::bit_not (arg0), 0, TREE_OVERFLOW (arg0));
+ return force_fit_type (type, ~wi::to_wide (arg0), 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
@@ -14221,7 +14234,7 @@ round_up_loc (location_t loc, tree value, unsigned int divisor)
{
if (TREE_CODE (value) == INTEGER_CST)
{
- wide_int val = value;
+ wide_int val = wi::to_wide (value);
bool overflow_p;
if ((val & (divisor - 1)) == 0)
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index aaebe17fb51..926bee73529 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,13 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * target-memory.c (gfc_interpret_logical): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * trans-const.c (gfc_conv_tree_to_mpz): Likewise.
+ * trans-expr.c (gfc_conv_cst_int_power): Likewise.
+ * trans-intrinsic.c (trans_this_image): Likewise.
+ (gfc_conv_intrinsic_bound): Likewise.
+ (conv_intrinsic_cobound): Likewise.
+
2017-10-08 Steven G. Kargl <kargl@gcc.gnu.org>
* check.c (gfc_check_x): Remove function.
diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c
index ceca3accd93..b2fe8eee01c 100644
--- a/gcc/fortran/target-memory.c
+++ b/gcc/fortran/target-memory.c
@@ -429,7 +429,7 @@ gfc_interpret_logical (int kind, unsigned char *buffer, size_t buffer_size,
{
tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer,
buffer_size);
- *logical = wi::eq_p (t, 0) ? 0 : 1;
+ *logical = wi::to_wide (t) == 0 ? 0 : 1;
return size_logical (kind);
}
diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c
index 128d47d0fa3..62b85f738fc 100644
--- a/gcc/fortran/trans-const.c
+++ b/gcc/fortran/trans-const.c
@@ -211,7 +211,7 @@ gfc_conv_mpz_to_tree (mpz_t i, int kind)
void
gfc_conv_tree_to_mpz (mpz_t i, tree source)
{
- wi::to_mpz (source, i, TYPE_SIGN (TREE_TYPE (source)));
+ wi::to_mpz (wi::to_wide (source), i, TYPE_SIGN (TREE_TYPE (source)));
}
/* Converts a real constant into backend form. */
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index d1b61b5228b..4e8bfc5d6f9 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -2861,7 +2861,7 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
HOST_WIDE_INT m;
unsigned HOST_WIDE_INT n;
int sgn;
- wide_int wrhs = rhs;
+ wi::tree_to_wide_ref wrhs = wi::to_wide (rhs);
/* If exponent is too large, we won't expand it anyway, so don't bother
with large integer values. */
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 9bc465e43d9..532d3ab237d 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -2235,8 +2235,9 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
if (INTEGER_CST_P (dim_arg))
{
- if (wi::ltu_p (dim_arg, 1)
- || wi::gtu_p (dim_arg, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wi::to_wide (dim_arg), 1)
+ || wi::gtu_p (wi::to_wide (dim_arg),
+ GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
@@ -2657,8 +2658,9 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
if (INTEGER_CST_P (bound))
{
if (((!as || as->type != AS_ASSUMED_RANK)
- && wi::geu_p (bound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
- || wi::gtu_p (bound, GFC_MAX_DIMENSIONS))
+ && wi::geu_p (wi::to_wide (bound),
+ GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
+ || wi::gtu_p (wi::to_wide (bound), GFC_MAX_DIMENSIONS))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", upper ? "UBOUND" : "LBOUND",
&expr->where);
@@ -2853,8 +2855,9 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
if (INTEGER_CST_P (bound))
{
- if (wi::ltu_p (bound, 1)
- || wi::gtu_p (bound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wi::to_wide (bound), 1)
+ || wi::gtu_p (wi::to_wide (bound),
+ GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index b9e08897f6d..cb33c1e09fe 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -6784,7 +6784,7 @@ gimple_fold_indirect_ref (tree t)
|| DECL_P (TREE_OPERAND (addr, 0)))
return fold_build2 (MEM_REF, type,
addr,
- wide_int_to_tree (ptype, off));
+ wide_int_to_tree (ptype, wi::to_wide (off)));
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
diff --git a/gcc/gimple-ssa-warn-alloca.c b/gcc/gimple-ssa-warn-alloca.c
index ab4f9d82858..2d255a493d0 100644
--- a/gcc/gimple-ssa-warn-alloca.c
+++ b/gcc/gimple-ssa-warn-alloca.c
@@ -194,7 +194,8 @@ alloca_call_type_by_arg (tree arg, tree arg_casted, edge e, unsigned max_size)
// degrade into "if (N > Y) alloca(N)".
if (cond_code == GT_EXPR || cond_code == GE_EXPR)
rhs = integer_zero_node;
- return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE, rhs);
+ return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE,
+ wi::to_wide (rhs));
}
}
else
@@ -294,7 +295,8 @@ alloca_call_type (gimple *stmt, bool is_vla, tree *invalid_casted_type)
if (TREE_CODE (len) == INTEGER_CST)
{
if (tree_to_uhwi (len) > max_size)
- return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE, len);
+ return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE,
+ wi::to_wide (len));
if (integer_zerop (len))
return alloca_type_and_limit (ALLOCA_ARG_IS_ZERO);
ret = alloca_type_and_limit (ALLOCA_OK);
diff --git a/gcc/gimple.c b/gcc/gimple.c
index c4e6f8176b9..79213b22c24 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -2965,13 +2965,14 @@ preprocess_case_label_vec_for_gimple (vec<tree> labels,
if (CASE_HIGH (labels[i]) != NULL_TREE
&& (CASE_HIGH (widest_label) == NULL_TREE
- || wi::gtu_p (wi::sub (CASE_HIGH (labels[i]),
- CASE_LOW (labels[i])),
- wi::sub (CASE_HIGH (widest_label),
- CASE_LOW (widest_label)))))
+ || (wi::gtu_p
+ (wi::to_wide (CASE_HIGH (labels[i]))
+ - wi::to_wide (CASE_LOW (labels[i])),
+ wi::to_wide (CASE_HIGH (widest_label))
+ - wi::to_wide (CASE_LOW (widest_label))))))
widest_label = labels[i];
- if (wi::add (low, 1) != high)
+ if (wi::to_wide (low) + 1 != wi::to_wide (high))
break;
}
if (i == len)
diff --git a/gcc/godump.c b/gcc/godump.c
index 28d81a1e260..9a9d70fd59e 100644
--- a/gcc/godump.c
+++ b/gcc/godump.c
@@ -1159,7 +1159,7 @@ go_output_typedef (struct godump_container *container, tree decl)
snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED,
tree_to_uhwi (TREE_VALUE (element)));
else
- print_hex (element, buf);
+ print_hex (wi::to_wide (element), buf);
mhval->value = xstrdup (buf);
*slot = mhval;
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 6cd5bc7c9d9..0e6824bbd45 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -63,7 +63,7 @@ along with GCC; see the file COPYING3. If not see
static inline void
tree_int_to_gmp (tree t, mpz_t res)
{
- wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
+ wi::to_mpz (wi::to_wide (t), res, TYPE_SIGN (TREE_TYPE (t)));
}
/* Return an isl identifier for the polyhedral basic block PBB. */
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index 051f78715c2..d9e243e1547 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -485,7 +485,7 @@ get_min_precision (tree arg, signop sign)
p = wi::min_precision (w, sign);
}
else
- p = wi::min_precision (arg, sign);
+ p = wi::min_precision (wi::to_wide (arg), sign);
return MIN (p, prec);
}
while (CONVERT_EXPR_P (arg)
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 6b3d8d7364c..d23c1d8ba3e 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -4971,8 +4971,8 @@ ipcp_store_vr_results (void)
{
vr.known = true;
vr.type = plats->m_value_range.m_vr.type;
- vr.min = plats->m_value_range.m_vr.min;
- vr.max = plats->m_value_range.m_vr.max;
+ vr.min = wi::to_wide (plats->m_value_range.m_vr.min);
+ vr.max = wi::to_wide (plats->m_value_range.m_vr.max);
}
else
{
diff --git a/gcc/ipa-polymorphic-call.c b/gcc/ipa-polymorphic-call.c
index 9ac5153bf67..1c5aca4abdc 100644
--- a/gcc/ipa-polymorphic-call.c
+++ b/gcc/ipa-polymorphic-call.c
@@ -967,8 +967,9 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl,
else if (TREE_CODE (base_pointer) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (base_pointer, 1)) == INTEGER_CST)
{
- offset_int o = offset_int::from (TREE_OPERAND (base_pointer, 1),
- SIGNED);
+ offset_int o
+ = offset_int::from (wi::to_wide (TREE_OPERAND (base_pointer, 1)),
+ SIGNED);
o *= BITS_PER_UNIT;
o += offset;
if (!wi::fits_shwi_p (o))
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 8fbb6435427..a687f7cb29e 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -397,9 +397,9 @@ ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs)
fprintf (f, " VR ");
fprintf (f, "%s[",
(jump_func->m_vr->type == VR_ANTI_RANGE) ? "~" : "");
- print_decs (jump_func->m_vr->min, f);
+ print_decs (wi::to_wide (jump_func->m_vr->min), f);
fprintf (f, ", ");
- print_decs (jump_func->m_vr->max, f);
+ print_decs (wi::to_wide (jump_func->m_vr->max), f);
fprintf (f, "]\n");
}
else
@@ -4373,7 +4373,8 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
if (TYPE_ALIGN (type) > align)
align = TYPE_ALIGN (type);
}
- misalign += (offset_int::from (off, SIGNED).to_short_addr ()
+ misalign += (offset_int::from (wi::to_wide (off),
+ SIGNED).to_short_addr ()
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 1911e84d1ca..fef0f9cc96d 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,8 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * lto.c (compare_tree_sccs_1): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 182607b6fa4..cc36851b8b6 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -1039,7 +1039,7 @@ compare_tree_sccs_1 (tree t1, tree t2, tree **map)
if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
{
- if (!wi::eq_p (t1, t2))
+ if (wi::to_wide (t1) != wi::to_wide (t2))
return false;
}
diff --git a/gcc/match.pd b/gcc/match.pd
index e58a65af59b..2f8a7411d76 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -276,7 +276,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(div (div @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
- wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
+ wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type), &overflow_p);
}
(if (!overflow_p)
(div @0 { wide_int_to_tree (type, mul); })
@@ -290,7 +291,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
- wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
+ wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type), &overflow_p);
}
/* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
otherwise undefined overflow implies that @0 must be zero. */
@@ -359,9 +361,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (integer_pow2p (@2)
&& tree_int_cst_sgn (@2) > 0
&& tree_nop_conversion_p (type, TREE_TYPE (@0))
- && wi::add (@2, @1) == 0)
- (rshift (convert @0) { build_int_cst (integer_type_node,
- wi::exact_log2 (@2)); }))))
+ && wi::to_wide (@2) + wi::to_wide (@1) == 0)
+ (rshift (convert @0)
+ { build_int_cst (integer_type_node,
+ wi::exact_log2 (wi::to_wide (@2))); }))))
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
@@ -414,7 +417,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(if (ANY_INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_UNDEFINED (type)
- && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
+ && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type)))
{ build_zero_cst (type); })))
/* X % -C is the same as X % C. */
@@ -422,7 +426,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(trunc_mod @0 INTEGER_CST@1)
(if (TYPE_SIGN (type) == SIGNED
&& !TREE_OVERFLOW (@1)
- && wi::neg_p (@1)
+ && wi::neg_p (wi::to_wide (@1))
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (@1, @1))
@@ -438,7 +442,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Avoid this transformation if X might be INT_MIN or
Y might be -1, because we would then change valid
INT_MIN % -(-1) into invalid INT_MIN % -1. */
- && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
+ && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
|| expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
(TREE_TYPE (@1))))))
(trunc_mod @0 (convert @1))))
@@ -471,7 +475,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(trunc_div (mult @0 integer_pow2p@1) @1)
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
(bit_and @0 { wide_int_to_tree
- (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
+ (type, wi::mask (TYPE_PRECISION (type)
+ - wi::exact_log2 (wi::to_wide (@1)),
false, TYPE_PRECISION (type))); })))
/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
@@ -505,7 +510,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for pows (POWI)
(simplify
(pows (op @0) INTEGER_CST@1)
- (if (wi::bit_and (@1, 1) == 0)
+ (if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
/* Strip negate and abs from both operands of hypot. */
(for hypots (HYPOT)
@@ -546,7 +551,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
copysigns (COPYSIGN)
(simplify
(pows (copysigns @0 @2) INTEGER_CST@1)
- (if (wi::bit_and (@1, 1) == 0)
+ (if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
(for hypots (HYPOT)
@@ -657,7 +662,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(minus (bit_xor @0 @1) @1))
(simplify
(minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
- (if (wi::bit_not (@2) == @1)
+ (if (~wi::to_wide (@2) == wi::to_wide (@1))
(minus (bit_xor @0 @1) @1)))
/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
@@ -672,7 +677,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(bit_xor @0 @1))
(simplify
(op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
- (if (wi::bit_not (@2) == @1)
+ (if (~wi::to_wide (@2) == wi::to_wide (@1))
(bit_xor @0 @1))))
/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
@@ -685,7 +690,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
+ && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
(bit_xor @0 @1)))
#endif
@@ -750,7 +755,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(bit_and SSA_NAME@0 INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
+ && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
@0))
#endif
@@ -851,7 +856,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(convert2? (bit_and@5 @2 INTEGER_CST@3)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@2))
- && wi::bit_and (@1, @3) == 0)
+ && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
(bit_ior (convert @4) (convert @5)))))
/* (X | Y) ^ X -> Y & ~ X*/
@@ -1150,7 +1155,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
(cmp @0 @2)
(if (TREE_CODE (@1) == INTEGER_CST
- && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
+ && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
(cmp @2 @0))))))
/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
@@ -1161,8 +1166,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_UNSIGNED (TREE_TYPE (@0))
&& TYPE_PRECISION (TREE_TYPE (@0)) > 1
- && wi::eq_p (@2, wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)),
- SIGNED) - 1))
+ && (wi::to_wide (@2)
+ == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
(icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
@@ -1170,7 +1175,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for cmp (simple_comparison)
(simplify
(cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
- (if (wi::gt_p(@2, 0, TYPE_SIGN (TREE_TYPE (@2))))
+ (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
(cmp @0 @1))))
/* X / C1 op C2 into a simple range test. */
@@ -1318,7 +1323,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for cmp (eq ne)
(simplify
(cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
- (if ((~get_nonzero_bits (@0) & @1) != 0)
+ (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
{ constant_boolean_node (cmp == NE_EXPR, type); })))
/* ((X inner_op C0) outer_op C1)
@@ -1350,18 +1355,18 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
if (inner_op == BIT_XOR_EXPR)
{
- C0 = wi::bit_and_not (@0, @1);
- cst_emit = wi::bit_or (C0, @1);
+ C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
+ cst_emit = C0 | wi::to_wide (@1);
}
else
{
- C0 = @0;
- cst_emit = wi::bit_xor (@0, @1);
+ C0 = wi::to_wide (@0);
+ cst_emit = C0 ^ wi::to_wide (@1);
}
}
- (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
+ (if (!fail && (C0 & zero_mask_not) == 0)
(outer_op @2 { wide_int_to_tree (type, cst_emit); })
- (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
+ (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
(inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
@@ -1394,7 +1399,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
... = ptr & ~algn; */
(simplify
(pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
- (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
+ (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
(bit_and @0 { algn; })))
/* Try folding difference of addresses. */
@@ -1424,8 +1429,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
unsigned HOST_WIDE_INT bitpos;
get_pointer_alignment_1 (@0, &align, &bitpos);
}
- (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
- { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
+ (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
+ { wide_int_to_tree (type, (wi::to_wide (@1)
+ & (bitpos / BITS_PER_UNIT))); }))))
/* We can't reassociate at all for saturating types. */
@@ -1535,8 +1541,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(inner_op @0 { cst; } )
/* X+INT_MAX+1 is X-INT_MIN. */
(if (INTEGRAL_TYPE_P (type) && cst
- && wi::eq_p (cst, wi::min_value (type)))
- (neg_inner_op @0 { wide_int_to_tree (type, cst); })
+ && wi::to_wide (cst) == wi::min_value (type))
+ (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
/* Last resort, use some unsigned type. */
(with { tree utype = unsigned_type_for (type); }
(view_convert (inner_op
@@ -1788,16 +1794,20 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for cmp (eq ne)
(simplify
(cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
- (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
(for cmp (eq ne)
(simplify
(cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
- (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
(for minmax (min min max max min min max max )
@@ -1824,7 +1834,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Optimize (x >> c) << c into x & (-1<<c). */
(simplify
(lshift (rshift @0 INTEGER_CST@1) @1)
- (if (wi::ltu_p (@1, element_precision (type)))
+ (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
(bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
@@ -1832,7 +1842,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(rshift (lshift @0 INTEGER_CST@1) @1)
(if (TYPE_UNSIGNED (type)
- && (wi::ltu_p (@1, element_precision (type))))
+ && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
(bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
(for shiftrotate (lrotate rrotate lshift rshift)
@@ -1879,10 +1889,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(op (op @0 INTEGER_CST@1) INTEGER_CST@2)
(with { unsigned int prec = element_precision (type); }
- (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
- && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
- && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
- && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
+ (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
+ && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
(with { unsigned int low = (tree_to_uhwi (@1)
+ tree_to_uhwi (@2)); }
/* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
@@ -1910,13 +1920,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for cmp (ne eq)
(simplify
(cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
- (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
+ (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
(if (cand < 0
|| (!integer_zerop (@2)
- && wi::ne_p (wi::lshift (@0, cand), @2)))
+ && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
{ constant_boolean_node (cmp == NE_EXPR, type); }
(if (!integer_zerop (@2)
- && wi::eq_p (wi::lshift (@0, cand), @2))
+ && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
(cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
@@ -2454,7 +2464,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
{
bool overflow = false;
enum tree_code code, cmp_code = cmp;
- wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
+ wide_int real_c1;
+ wide_int c1 = wi::to_wide (@1);
+ wide_int c2 = wi::to_wide (@2);
+ wide_int c3 = wi::to_wide (@3);
signop sgn = TYPE_SIGN (from_type);
/* Handle special case A), given x of unsigned type:
@@ -2592,13 +2605,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(cmp @0 INTEGER_CST@1)
(if (tree_int_cst_sgn (@1) == -1)
- (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
(for cmp (ge lt)
acmp (gt le)
(simplify
(cmp @0 INTEGER_CST@1)
(if (tree_int_cst_sgn (@1) == 1)
- (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
/* We can simplify a logical negation of a comparison to the
@@ -2998,13 +3011,14 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(cmp (exact_div @0 @1) INTEGER_CST@2)
(if (!integer_zerop (@1))
- (if (wi::eq_p (@2, 0))
+ (if (wi::to_wide (@2) == 0)
(cmp @0 @2)
(if (TREE_CODE (@1) == INTEGER_CST)
(with
{
bool ovf;
- wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
+ wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
+ TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
(if (ovf)
{ constant_boolean_node (cmp == NE_EXPR, type); }
@@ -3012,14 +3026,16 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for cmp (lt le gt ge)
(simplify
(cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
+ (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
(with
{
bool ovf;
- wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
+ wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
+ TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
(if (ovf)
- { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
+ { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
+ TYPE_SIGN (TREE_TYPE (@2)))
!= (cmp == LT_EXPR || cmp == LE_EXPR), type); }
(cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
@@ -3191,7 +3207,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
(if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
- && wi::bit_and_not (@1, @2) != 0)
+ && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
{ constant_boolean_node (cmp == NE_EXPR, type); }))
/* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
@@ -3231,7 +3247,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(ne (bit_and @0 integer_pow2p@1) integer_zerop)
integer_pow2p@2 integer_zerop)
(with {
- int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
+ int shift = (wi::exact_log2 (wi::to_wide (@2))
+ - wi::exact_log2 (wi::to_wide (@1)));
}
(if (shift > 0)
(bit_and
@@ -3248,7 +3265,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& type_has_mode_precision_p (TREE_TYPE (@0))
&& element_precision (@2) >= element_precision (@0)
- && wi::only_sign_bit_p (@1, element_precision (@0)))
+ && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
(ncmp (convert:stype @0) { build_zero_cst (stype); })))))
@@ -3260,7 +3277,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
integer_pow2p@1 integer_zerop)
(if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
(with {
- int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
+ int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
}
(if (shift >= 0)
(bit_and
@@ -3381,7 +3398,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
wide_int min = wi::min_value (arg1_type);
}
(switch
- (if (wi::eq_p (@1, max))
+ (if (wi::to_wide (@1) == max)
(switch
(if (cmp == GT_EXPR)
{ constant_boolean_node (false, type); })
@@ -3391,7 +3408,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
{ constant_boolean_node (true, type); })
(if (cmp == LT_EXPR)
(ne @2 @1))))
- (if (wi::eq_p (@1, min))
+ (if (wi::to_wide (@1) == min)
(switch
(if (cmp == LT_EXPR)
{ constant_boolean_node (false, type); })
@@ -3401,19 +3418,19 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
{ constant_boolean_node (true, type); })
(if (cmp == GT_EXPR)
(ne @2 @1))))
- (if (wi::eq_p (@1, max - 1))
+ (if (wi::to_wide (@1) == max - 1)
(switch
(if (cmp == GT_EXPR)
- (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
(if (cmp == LE_EXPR)
- (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
- (if (wi::eq_p (@1, min + 1))
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
+ (if (wi::to_wide (@1) == min + 1)
(switch
(if (cmp == GE_EXPR)
- (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
(if (cmp == LT_EXPR)
- (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
- (if (wi::eq_p (@1, signed_max)
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
+ (if (wi::to_wide (@1) == signed_max
&& TYPE_UNSIGNED (arg1_type)
/* We will flip the signedness of the comparison operator
associated with the mode of @1, so the sign bit is
@@ -3469,10 +3486,12 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(cmp:c (plus@2 @0 INTEGER_CST@1) @0)
(if (TYPE_UNSIGNED (TREE_TYPE (@0))
&& TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
- && wi::ne_p (@1, 0)
+ && wi::to_wide (@1) != 0
&& single_use (@2))
- (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
- (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
+ (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
+ (out @0 { wide_int_to_tree (TREE_TYPE (@0),
+ wi::max_value (prec, UNSIGNED)
+ - wi::to_wide (@1)); })))))
/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
@@ -4034,13 +4053,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(POWI @0 INTEGER_CST@1)
(switch
/* powi(x,0) -> 1. */
- (if (wi::eq_p (@1, 0))
+ (if (wi::to_wide (@1) == 0)
{ build_real (type, dconst1); })
/* powi(x,1) -> x. */
- (if (wi::eq_p (@1, 1))
+ (if (wi::to_wide (@1) == 1)
@0)
/* powi(x,-1) -> 1/x. */
- (if (wi::eq_p (@1, -1))
+ (if (wi::to_wide (@1) == -1)
(rdiv { build_real (type, dconst1); } @0))))
/* Narrowing of arithmetic and logical operations.
@@ -4105,8 +4124,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& types_match (@0, @1)
&& (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
<= TYPE_PRECISION (TREE_TYPE (@0)))
- && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
- true, TYPE_PRECISION (type))) == 0))
+ && (wi::to_wide (@4)
+ & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
+ true, TYPE_PRECISION (type))) == 0)
(if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
(with { tree ntype = TREE_TYPE (@0); }
(convert (bit_and (op @0 @1) (convert:ntype @4))))
@@ -4166,7 +4186,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
WARN_STRICT_OVERFLOW_CONDITIONAL);
bool less = cmp == LE_EXPR || cmp == LT_EXPR;
/* wi::ges_p (@2, 0) should be sufficient for a signed type. */
- bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
+ bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
+ TYPE_SIGN (TREE_TYPE (@1)))
!= (op == MINUS_EXPR);
constant_boolean_node (less == ovf_high, type);
}
@@ -4292,10 +4313,14 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
}
(switch
- (if (wi::leu_p (@ipos, @rpos)
- && wi::leu_p (wi::add (@rpos, @rsize), wi::add (@ipos, isize)))
+ (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
+ && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
+ wi::to_wide (@ipos) + isize))
(BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
- wi::sub (@rpos, @ipos)); }))
- (if (wi::geu_p (@ipos, wi::add (@rpos, @rsize))
- || wi::geu_p (@rpos, wi::add (@ipos, isize)))
+ wi::to_wide (@rpos)
+ - wi::to_wide (@ipos)); }))
+ (if (wi::geu_p (wi::to_wide (@ipos),
+ wi::to_wide (@rpos) + wi::to_wide (@rsize))
+ || wi::geu_p (wi::to_wide (@rpos),
+ wi::to_wide (@ipos) + isize))
(BIT_FIELD_REF @0 @rsize @rpos)))))
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index ddfdb1c8c90..20b0fe44b29 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -1,3 +1,8 @@
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * objc-act.c (objc_decl_method_attributes): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-09-29 Jakub Jelinek <jakub@redhat.com>
* objc-act.c (check_ivars, gen_declaration): For OBJCPLUS look at
diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c
index 5d81af7fbd6..ce2adcc0ded 100644
--- a/gcc/objc/objc-act.c
+++ b/gcc/objc/objc-act.c
@@ -4900,10 +4900,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
number = TREE_VALUE (second_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (second_argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
/* This is the third argument, the "first-to-check",
which specifies the index of the first argument to
@@ -4913,10 +4913,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
number = TREE_VALUE (third_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (third_argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
}
filtered_attributes = chainon (filtered_attributes,
new_attribute);
@@ -4949,10 +4949,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
/* Get the value of the argument and add 2. */
tree number = TREE_VALUE (argument);
if (number && TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
argument = TREE_CHAIN (argument);
}
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 3645661038a..afa758bf499 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -3081,7 +3081,7 @@ scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
- *tp = wide_int_to_tree (tem, t);
+ *tp = wide_int_to_tree (tem, wi::to_wide (t));
else
TREE_TYPE (t) = tem;
}
@@ -6372,14 +6372,14 @@ lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
- wide_int offset = wide_int::from (TREE_PURPOSE (vec),
+ wide_int offset = wide_int::from (wi::to_wide (TREE_PURPOSE (vec)),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
- if (!wi::multiple_of_p
- (wi::abs (offset), wi::abs ((wide_int) fd.loops[i].step),
- UNSIGNED))
+ if (!wi::multiple_of_p (wi::abs (offset),
+ wi::abs (wi::to_wide (fd.loops[i].step)),
+ UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 9497cb4f238..d534c76ee49 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -118,7 +118,7 @@ print_node_brief (FILE *file, const char *prefix, const_tree node, int indent)
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (wi::to_wide (node), file, TYPE_SIGN (TREE_TYPE (node)));
}
if (TREE_CODE (node) == REAL_CST)
{
@@ -721,7 +721,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (wi::to_wide (node), file, TYPE_SIGN (TREE_TYPE (node)));
break;
case REAL_CST:
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 92bd209ad64..410ae61bd4d 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -941,7 +941,7 @@ expand_case (gswitch *stmt)
original type. Make sure to drop overflow flags. */
low = fold_convert (index_type, low);
if (TREE_OVERFLOW (low))
- low = wide_int_to_tree (index_type, low);
+ low = wide_int_to_tree (index_type, wi::to_wide (low));
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
@@ -950,7 +950,7 @@ expand_case (gswitch *stmt)
high = low;
high = fold_convert (index_type, high);
if (TREE_OVERFLOW (high))
- high = wide_int_to_tree (index_type, high);
+ high = wide_int_to_tree (index_type, wi::to_wide (high));
case_list.safe_push (simple_case_node (low, high, lab));
}
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 938be6745af..02739b0ed7f 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2362,9 +2362,11 @@ layout_type (tree type)
&& tree_int_cst_lt (ub, lb))
{
lb = wide_int_to_tree (ssizetype,
- offset_int::from (lb, SIGNED));
+ offset_int::from (wi::to_wide (lb),
+ SIGNED));
ub = wide_int_to_tree (ssizetype,
- offset_int::from (ub, SIGNED));
+ offset_int::from (wi::to_wide (ub),
+ SIGNED));
}
length
= fold_convert (sizetype,
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index f7a5f121c9c..47f56bf2b54 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -408,8 +408,8 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
&& get_range_info (op0, &minv, &maxv) == VR_RANGE)
{
if (icode == PLUS_EXPR)
- op1 = wide_int_to_tree (itype, wi::neg (op1));
- if (wi::geu_p (minv, op1))
+ op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
+ if (wi::geu_p (minv, wi::to_wide (op1)))
{
op0 = fold_convert (otype, op0);
op1 = fold_convert (otype, op1);
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 99d1f1e1af8..b5e0460c84a 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -1721,12 +1721,12 @@ group_case_labels_stmt (gswitch *stmt)
{
tree merge_case = gimple_switch_label (stmt, next_index);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- wide_int bhp1 = wi::add (base_high, 1);
+ wide_int bhp1 = wi::to_wide (base_high) + 1;
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && wi::eq_p (CASE_LOW (merge_case), bhp1))
+ && wi::to_wide (CASE_LOW (merge_case)) == bhp1)
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 72cc8de59e8..19cceb8dfd9 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -1060,12 +1060,15 @@ dr_analyze_indices (struct data_reference *dr, loop_p nest, loop_p loop)
if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
&& TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
&& !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
- rem = wi::mod_trunc (off, TYPE_SIZE_UNIT (TREE_TYPE (ref)), SIGNED);
+ rem = wi::mod_trunc
+ (wi::to_wide (off),
+ wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))),
+ SIGNED);
else
/* If we can't compute the remainder simply force the initial
condition to zero. */
- rem = off;
- off = wide_int_to_tree (ssizetype, wi::sub (off, rem));
+ rem = wi::to_wide (off);
+ off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem);
memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
/* And finally replace the initial condition. */
access_fn = chrec_replace_initial_condition
@@ -1485,14 +1488,16 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
std::swap (*dr_a1, *dr_a2);
bool do_remove = false;
- wide_int diff = wi::sub (DR_INIT (dr_a2->dr), DR_INIT (dr_a1->dr));
+ wide_int diff = (wi::to_wide (DR_INIT (dr_a2->dr))
+ - wi::to_wide (DR_INIT (dr_a1->dr)));
wide_int min_seg_len_b;
tree new_seg_len;
if (TREE_CODE (dr_b1->seg_len) == INTEGER_CST)
- min_seg_len_b = wi::abs (dr_b1->seg_len);
+ min_seg_len_b = wi::abs (wi::to_wide (dr_b1->seg_len));
else
- min_seg_len_b = wi::mul (factor, wi::abs (DR_STEP (dr_b1->dr)));
+ min_seg_len_b
+ = factor * wi::abs (wi::to_wide (DR_STEP (dr_b1->dr)));
/* Now we try to merge alias check dr_a1 & dr_b and dr_a2 & dr_b.
@@ -1531,7 +1536,7 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
/* Adjust diff according to access size of both references. */
tree size_a1 = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a1->dr)));
tree size_a2 = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a2->dr)));
- diff = wi::add (diff, wi::sub (size_a2, size_a1));
+ diff += wi::to_wide (size_a2) - wi::to_wide (size_a1);
/* Case A.1. */
if (wi::leu_p (diff, min_seg_len_b)
/* Case A.2 and B combined. */
@@ -1539,11 +1544,12 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
{
if (tree_fits_uhwi_p (dr_a1->seg_len)
&& tree_fits_uhwi_p (dr_a2->seg_len))
- new_seg_len
- = wide_int_to_tree (sizetype,
- wi::umin (wi::sub (dr_a1->seg_len,
- diff),
- dr_a2->seg_len));
+ {
+ wide_int min_len
+ = wi::umin (wi::to_wide (dr_a1->seg_len) - diff,
+ wi::to_wide (dr_a2->seg_len));
+ new_seg_len = wide_int_to_tree (sizetype, min_len);
+ }
else
new_seg_len
= size_binop (MINUS_EXPR, dr_a2->seg_len,
@@ -1562,11 +1568,12 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
{
if (tree_fits_uhwi_p (dr_a1->seg_len)
&& tree_fits_uhwi_p (dr_a2->seg_len))
- new_seg_len
- = wide_int_to_tree (sizetype,
- wi::umax (wi::add (dr_a2->seg_len,
- diff),
- dr_a1->seg_len));
+ {
+ wide_int max_len
+ = wi::umax (wi::to_wide (dr_a2->seg_len) + diff,
+ wi::to_wide (dr_a1->seg_len));
+ new_seg_len = wide_int_to_tree (sizetype, max_len);
+ }
else
new_seg_len
= size_binop (PLUS_EXPR, dr_a2->seg_len,
diff --git a/gcc/tree-dump.c b/gcc/tree-dump.c
index da36031da30..ac0c7b868a1 100644
--- a/gcc/tree-dump.c
+++ b/gcc/tree-dump.c
@@ -540,7 +540,7 @@ dequeue_and_dump (dump_info_p di)
case INTEGER_CST:
fprintf (di->stream, "int: ");
- print_decs (t, di->stream);
+ print_decs (wi::to_wide (t), di->stream);
break;
case STRING_CST:
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index a226096504f..c764a44db61 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -949,7 +949,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = wide_int_to_tree (new_type, *tp);
+ *tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
@@ -1133,7 +1133,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = wide_int_to_tree (new_type, *tp);
+ *tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index e7b10cb390f..fdb32f10529 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -1655,7 +1655,8 @@ is_inv_store_elimination_chain (struct loop *loop, chain_p chain)
/* If loop iterates for unknown times or fewer times than chain->lenght,
we still need to setup root variable and propagate it with PHI node. */
tree niters = number_of_latch_executions (loop);
- if (TREE_CODE (niters) != INTEGER_CST || wi::leu_p (niters, chain->length))
+ if (TREE_CODE (niters) != INTEGER_CST
+ || wi::leu_p (wi::to_wide (niters), chain->length))
return false;
/* Check stores in chain for elimination if they only store loop invariant
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 14c7caa60af..4e7bb5fbdbc 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1710,7 +1710,7 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
pp_unsigned_wide_integer (pp, tree_to_uhwi (node));
else
{
- wide_int val = node;
+ wide_int val = wi::to_wide (node);
if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node))))
{
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index cdf940a3874..58c2bde8307 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -3324,7 +3324,7 @@ iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
return false;
if (TREE_CODE (base) == INTEGER_CST)
- base_min = base_max = base;
+ base_min = base_max = wi::to_wide (base);
else if (TREE_CODE (base) == SSA_NAME
&& INTEGRAL_TYPE_P (TREE_TYPE (base))
&& get_range_info (base, &base_min, &base_max) == VR_RANGE)
@@ -3333,7 +3333,7 @@ iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
return true;
if (TREE_CODE (step) == INTEGER_CST)
- step_min = step_max = step;
+ step_min = step_max = wi::to_wide (step);
else if (TREE_CODE (step) == SSA_NAME
&& INTEGRAL_TYPE_P (TREE_TYPE (step))
&& get_range_info (step, &step_min, &step_max) == VR_RANGE)
@@ -3593,7 +3593,8 @@ simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
extreme = wi::max_value (type);
}
overflow = false;
- extreme = wi::sub (extreme, iv->step, TYPE_SIGN (type), &overflow);
+ extreme = wi::sub (extreme, wi::to_wide (iv->step),
+ TYPE_SIGN (type), &overflow);
if (overflow)
return true;
e = fold_build2 (code, boolean_type_node, base,
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index 5e354a17ce9..14c743414df 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -197,13 +197,13 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as,
struct mem_addr_template *templ;
if (addr->step && !integer_onep (addr->step))
- st = immed_wide_int_const (addr->step, pointer_mode);
+ st = immed_wide_int_const (wi::to_wide (addr->step), pointer_mode);
else
st = NULL_RTX;
if (addr->offset && !integer_zerop (addr->offset))
{
- offset_int dc = offset_int::from (addr->offset, SIGNED);
+ offset_int dc = offset_int::from (wi::to_wide (addr->offset), SIGNED);
off = immed_wide_int_const (dc, pointer_mode);
}
else
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index df409af2d83..439bb0a8b40 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -951,8 +951,9 @@ ccp_finalize (bool nonzero_p)
else
{
unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
- wide_int nonzero_bits = wide_int::from (val->mask, precision,
- UNSIGNED) | val->value;
+ wide_int nonzero_bits
+ = (wide_int::from (val->mask, precision, UNSIGNED)
+ | wi::to_wide (val->value));
nonzero_bits &= get_nonzero_bits (name);
set_nonzero_bits (name, nonzero_bits);
}
@@ -1972,9 +1973,10 @@ evaluate_stmt (gimple *stmt)
}
else
{
- if (wi::bit_and_not (val.value, nonzero_bits) != 0)
+ if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
val.value = wide_int_to_tree (TREE_TYPE (lhs),
- nonzero_bits & val.value);
+ nonzero_bits
+ & wi::to_wide (val.value));
if (nonzero_bits == 0)
val.mask = 0;
else
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index bbea619171a..2a71027a1e2 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -2160,8 +2160,8 @@ constant_multiple_of (tree top, tree bot, widest_int *mul)
if (TREE_CODE (bot) != INTEGER_CST)
return false;
- p0 = widest_int::from (top, SIGNED);
- p1 = widest_int::from (bot, SIGNED);
+ p0 = widest_int::from (wi::to_wide (top), SIGNED);
+ p1 = widest_int::from (wi::to_wide (bot), SIGNED);
if (p1 == 0)
return false;
*mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 27244eb27c1..f8f2eeb1a3b 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -92,14 +92,14 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
*var = op0;
/* Always sign extend the offset. */
- wi::to_mpz (op1, offset, SIGNED);
+ wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
if (negate)
mpz_neg (offset, offset);
break;
case INTEGER_CST:
*var = build_int_cst_type (type, 0);
- wi::to_mpz (expr, offset, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
break;
default:
@@ -164,7 +164,7 @@ refine_value_range_using_guard (tree type, tree var,
/* Case of comparing VAR with its below/up bounds. */
mpz_init (valc1);
- wi::to_mpz (c1, valc1, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
if (mpz_cmp (valc1, below) == 0)
cmp = GT_EXPR;
if (mpz_cmp (valc1, up) == 0)
@@ -178,9 +178,9 @@ refine_value_range_using_guard (tree type, tree var,
wide_int min = wi::min_value (type);
wide_int max = wi::max_value (type);
- if (wi::eq_p (c1, min))
+ if (wi::to_wide (c1) == min)
cmp = GT_EXPR;
- if (wi::eq_p (c1, max))
+ if (wi::to_wide (c1) == max)
cmp = LT_EXPR;
}
@@ -221,8 +221,8 @@ refine_value_range_using_guard (tree type, tree var,
/* Setup range information for varc1. */
if (integer_zerop (varc1))
{
- wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
- wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
+ wi::to_mpz (0, minc1, TYPE_SIGN (type));
+ wi::to_mpz (0, maxc1, TYPE_SIGN (type));
}
else if (TREE_CODE (varc1) == SSA_NAME
&& INTEGRAL_TYPE_P (type)
@@ -903,7 +903,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
if (integer_onep (s)
|| (TREE_CODE (c) == INTEGER_CST
&& TREE_CODE (s) == INTEGER_CST
- && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
+ && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
+ TYPE_SIGN (type)) == 0)
|| (TYPE_OVERFLOW_UNDEFINED (type)
&& multiple_of_p (type, c, s)))
{
@@ -922,7 +923,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
+ max = wi::mask <widest_int> (TYPE_PRECISION (type)
+ - wi::ctz (wi::to_wide (s)), false);
wi::to_mpz (max, bnd, UNSIGNED);
return;
}
@@ -938,13 +940,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
/* ... then we can strengthen this to C / S, and possibly we can use
the upper bound on C given by BNDS. */
if (TREE_CODE (c) == INTEGER_CST)
- wi::to_mpz (c, bnd, UNSIGNED);
+ wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
else if (bnds_u_valid)
mpz_set (bnd, bnds->up);
}
mpz_init (d);
- wi::to_mpz (s, d, UNSIGNED);
+ wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
mpz_fdiv_q (bnd, bnd, d);
mpz_clear (d);
}
@@ -1157,7 +1159,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
tmod = fold_convert (type1, mod);
mpz_init (mmod);
- wi::to_mpz (mod, mmod, UNSIGNED);
+ wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
mpz_neg (mmod, mmod);
/* If the induction variable does not overflow and the exit is taken,
@@ -1543,7 +1545,7 @@ number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
mpz_init (mstep);
mpz_init (tmp);
- wi::to_mpz (step, mstep, UNSIGNED);
+ wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
@@ -3178,7 +3180,7 @@ get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
if (is_min == tree_int_cst_sign_bit (iv.step))
return false;
- *init = iv.base;
+ *init = wi::to_wide (iv.base);
return true;
}
@@ -3225,7 +3227,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
&& INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
&& (get_range_info (orig_base, &min, &max) == VR_RANGE
|| get_cst_init_from_scev (orig_base, &max, false))
- && wi::gts_p (high, max))
+ && wi::gts_p (wi::to_wide (high), max))
base = wide_int_to_tree (unsigned_type, max);
else if (TREE_CODE (base) != INTEGER_CST
&& dominated_by_p (CDI_DOMINATORS,
@@ -3243,7 +3245,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
&& INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
&& (get_range_info (orig_base, &min, &max) == VR_RANGE
|| get_cst_init_from_scev (orig_base, &min, true))
- && wi::gts_p (min, low))
+ && wi::gts_p (min, wi::to_wide (low)))
base = wide_int_to_tree (unsigned_type, min);
else if (TREE_CODE (base) != INTEGER_CST
&& dominated_by_p (CDI_DOMINATORS,
@@ -4499,19 +4501,15 @@ scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
MIN - type_MIN >= |step| ; if step < 0.
Or VAR must take value outside of value range, which is not true. */
- step_wi = step;
+ step_wi = wi::to_wide (step);
type = TREE_TYPE (var);
if (tree_int_cst_sign_bit (step))
{
- diff = lower_bound_in_type (type, type);
- diff = minv - diff;
+ diff = minv - wi::to_wide (lower_bound_in_type (type, type));
step_wi = - step_wi;
}
else
- {
- diff = upper_bound_in_type (type, type);
- diff = diff - maxv;
- }
+ diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
return (wi::geu_p (diff, step_wi));
}
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index f5c07dc27f1..05fbb31b0ee 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -1123,7 +1123,8 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
if (cmp == LT_EXPR)
{
bool overflow;
- wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
+ wide_int alt = wi::sub (wi::to_wide (larger), 1,
+ TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
if (! overflow)
alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
@@ -1131,7 +1132,8 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
else
{
bool overflow;
- wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
+ wide_int alt = wi::add (wi::to_wide (larger), 1,
+ TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
if (! overflow)
alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
@@ -1149,7 +1151,8 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
if (cmp == GT_EXPR)
{
bool overflow;
- wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
+ wide_int alt = wi::add (wi::to_wide (smaller), 1,
+ TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
if (! overflow)
alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
@@ -1157,7 +1160,8 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
else
{
bool overflow;
- wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
+ wide_int alt = wi::sub (wi::to_wide (smaller), 1,
+ TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
if (! overflow)
alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 364272ddf0e..5eb47a9d6d5 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -4020,21 +4020,25 @@ compute_avail (void)
{
ref->set = set;
if (ref1->opcode == MEM_REF)
- ref1->op0 = wide_int_to_tree (TREE_TYPE (ref2->op0),
- ref1->op0);
+ ref1->op0
+ = wide_int_to_tree (TREE_TYPE (ref2->op0),
+ wi::to_wide (ref1->op0));
else
- ref1->op2 = wide_int_to_tree (TREE_TYPE (ref2->op2),
- ref1->op2);
+ ref1->op2
+ = wide_int_to_tree (TREE_TYPE (ref2->op2),
+ wi::to_wide (ref1->op2));
}
else
{
ref->set = 0;
if (ref1->opcode == MEM_REF)
- ref1->op0 = wide_int_to_tree (ptr_type_node,
- ref1->op0);
+ ref1->op0
+ = wide_int_to_tree (ptr_type_node,
+ wi::to_wide (ref1->op0));
else
- ref1->op2 = wide_int_to_tree (ptr_type_node,
- ref1->op2);
+ ref1->op2
+ = wide_int_to_tree (ptr_type_node,
+ wi::to_wide (ref1->op2));
}
operands.release ();
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 59397495abf..d27bcee8262 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1167,7 +1167,7 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops,
gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF);
if (addr_base != TREE_OPERAND (op->op0, 0))
{
- offset_int off = offset_int::from (mem_op->op0, SIGNED);
+ offset_int off = offset_int::from (wi::to_wide (mem_op->op0), SIGNED);
off += addr_offset;
mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off);
op->op0 = build_fold_addr_expr (addr_base);
@@ -1202,7 +1202,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
&& code != POINTER_PLUS_EXPR)
return false;
- off = offset_int::from (mem_op->op0, SIGNED);
+ off = offset_int::from (wi::to_wide (mem_op->op0), SIGNED);
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
@@ -1235,8 +1235,9 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
&& tem[tem.length () - 2].opcode == MEM_REF)
{
vn_reference_op_t new_mem_op = &tem[tem.length () - 2];
- new_mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0),
- new_mem_op->op0);
+ new_mem_op->op0
+ = wide_int_to_tree (TREE_TYPE (mem_op->op0),
+ wi::to_wide (new_mem_op->op0));
}
else
gcc_assert (tem.last ().opcode == STRING_CST);
@@ -3537,7 +3538,7 @@ valueized_wider_op (tree wide_type, tree op)
/* For constants simply extend it. */
if (TREE_CODE (op) == INTEGER_CST)
- return wide_int_to_tree (wide_type, op);
+ return wide_int_to_tree (wide_type, wi::to_wide (op));
return NULL_TREE;
}
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 2cca970f1e5..407ad3759b3 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -3098,7 +3098,7 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
else
{
/* Sign-extend the offset. */
- offset_int soffset = offset_int::from (offset, SIGNED);
+ offset_int soffset = offset_int::from (wi::to_wide (offset), SIGNED);
if (!wi::fits_shwi_p (soffset))
rhsoffset = UNKNOWN_OFFSET;
else
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index 67f0d840bf5..4096ded7ea2 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -1474,8 +1474,8 @@ is_pred_expr_subset_of (pred_info expr1, pred_info expr2)
code2 = invert_tree_comparison (code2, false);
if ((code1 == EQ_EXPR || code1 == BIT_AND_EXPR) && code2 == BIT_AND_EXPR)
- return wi::eq_p (expr1.pred_rhs,
- wi::bit_and (expr1.pred_rhs, expr2.pred_rhs));
+ return (wi::to_wide (expr1.pred_rhs)
+ == (wi::to_wide (expr1.pred_rhs) & wi::to_wide (expr2.pred_rhs)));
if (code1 != code2 && code2 != NE_EXPR)
return false;
diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c
index 5c96075a05e..6d344ad5309 100644
--- a/gcc/tree-ssanames.c
+++ b/gcc/tree-ssanames.c
@@ -454,8 +454,8 @@ set_nonzero_bits (tree name, const wide_int_ref &mask)
if (mask == -1)
return;
set_range_info_raw (name, VR_RANGE,
- TYPE_MIN_VALUE (TREE_TYPE (name)),
- TYPE_MAX_VALUE (TREE_TYPE (name)));
+ wi::to_wide (TYPE_MIN_VALUE (TREE_TYPE (name))),
+ wi::to_wide (TYPE_MAX_VALUE (TREE_TYPE (name))));
}
range_info_def *ri = SSA_NAME_RANGE_INFO (name);
ri->set_nonzero_bits (mask);
@@ -468,7 +468,7 @@ wide_int
get_nonzero_bits (const_tree name)
{
if (TREE_CODE (name) == INTEGER_CST)
- return name;
+ return wi::to_wide (name);
/* Use element_precision instead of TYPE_PRECISION so complex and
vector types get a non-zero precision. */
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 19f0a73fbe2..dc9fc84c6a0 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -655,8 +655,7 @@ collect_switch_conv_info (gswitch *swtch, struct switch_conv_info *info)
for (i = 2; i < branch_num; i++)
{
tree elt = gimple_switch_label (swtch, i);
- wide_int w = last;
- if (w + 1 != CASE_LOW (elt))
+ if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
{
info->contiguous_range = false;
break;
@@ -1065,7 +1064,7 @@ array_value_type (gswitch *swtch, tree type, int num,
if (TREE_CODE (elt->value) != INTEGER_CST)
return type;
- cst = elt->value;
+ cst = wi::to_wide (elt->value);
while (1)
{
unsigned int prec = GET_MODE_BITSIZE (mode);
@@ -1778,11 +1777,12 @@ dump_case_nodes (FILE *f, case_node *root, int indent_step, int indent_level)
fputs (";; ", f);
fprintf (f, "%*s", indent_step * indent_level, "");
- print_dec (root->low, f, TYPE_SIGN (TREE_TYPE (root->low)));
+ print_dec (wi::to_wide (root->low), f, TYPE_SIGN (TREE_TYPE (root->low)));
if (!tree_int_cst_equal (root->low, root->high))
{
fprintf (f, " ... ");
- print_dec (root->high, f, TYPE_SIGN (TREE_TYPE (root->high)));
+ print_dec (wi::to_wide (root->high), f,
+ TYPE_SIGN (TREE_TYPE (root->high)));
}
fputs ("\n", f);
@@ -2113,7 +2113,7 @@ try_switch_expansion (gswitch *stmt)
original type. Make sure to drop overflow flags. */
low = fold_convert (index_type, low);
if (TREE_OVERFLOW (low))
- low = wide_int_to_tree (index_type, low);
+ low = wide_int_to_tree (index_type, wi::to_wide (low));
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
@@ -2122,7 +2122,7 @@ try_switch_expansion (gswitch *stmt)
high = low;
high = fold_convert (index_type, high);
if (TREE_OVERFLOW (high))
- high = wide_int_to_tree (index_type, high);
+ high = wide_int_to_tree (index_type, wi::to_wide (high));
basic_block case_bb = label_to_block_fn (cfun, lab);
edge case_edge = find_edge (bb, case_bb);
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index d6085337147..910334f664e 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -1232,9 +1232,11 @@ vect_gen_vector_loop_niters (loop_vec_info loop_vinfo, tree niters,
/* Peeling algorithm guarantees that vector loop bound is at least ONE,
we set range information to make niters analyzer's life easier. */
if (stmts != NULL)
- set_range_info (niters_vector, VR_RANGE, build_int_cst (type, 1),
- fold_build2 (RSHIFT_EXPR, type,
- TYPE_MAX_VALUE (type), log_vf));
+ set_range_info (niters_vector, VR_RANGE,
+ wi::to_wide (build_int_cst (type, 1)),
+ wi::to_wide (fold_build2 (RSHIFT_EXPR, type,
+ TYPE_MAX_VALUE (type),
+ log_vf)));
}
*niters_vector_ptr = niters_vector;
@@ -1787,7 +1789,8 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
least VF, so set range information for newly generated var. */
if (new_var_p)
set_range_info (niters, VR_RANGE,
- build_int_cst (type, vf), TYPE_MAX_VALUE (type));
+ wi::to_wide (build_int_cst (type, vf)),
+ wi::to_wide (TYPE_MAX_VALUE (type)));
/* Prolog iterates at most bound_prolog times, latch iterates at
most bound_prolog - 1 times. */
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index ed0879eddf9..e4051b68dd0 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -3714,7 +3714,7 @@ vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
vectorized matches the vector type of the result in
size and number of elements. */
unsigned prec
- = wi::udiv_trunc (TYPE_SIZE (vectype),
+ = wi::udiv_trunc (wi::to_wide (TYPE_SIZE (vectype)),
TYPE_VECTOR_SUBPARTS (vectype)).to_uhwi ();
tree type
= build_nonstandard_integer_type (prec,
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index f986b753c83..d3fa5164238 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -7712,11 +7712,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (group_gap_adj != 0 && ! slp_perm
&& group_elt == group_size - group_gap_adj)
{
- bool ovf;
- tree bump
- = wide_int_to_tree (sizetype,
- wi::smul (TYPE_SIZE_UNIT (elem_type),
- group_gap_adj, &ovf));
+ wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
+ tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
group_elt = 0;
@@ -7726,11 +7724,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
elements loaded for a permuted SLP load. */
if (group_gap_adj != 0 && slp_perm)
{
- bool ovf;
- tree bump
- = wide_int_to_tree (sizetype,
- wi::smul (TYPE_SIZE_UNIT (elem_type),
- group_gap_adj, &ovf));
+ wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
+ tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
}
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 3e8be2688fb..2c86b8e5c91 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1072,7 +1072,8 @@ compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
if (!inv2)
inv2 = build_int_cst (TREE_TYPE (val2), 0);
- return wi::cmp (inv1, inv2, TYPE_SIGN (TREE_TYPE (val1)));
+ return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
+ TYPE_SIGN (TREE_TYPE (val1)));
}
const bool cst1 = is_gimple_min_invariant (val1);
@@ -1099,10 +1100,11 @@ compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
/* Compute the difference between the constants. If it overflows or
underflows, this means that we can trivially compare the NAME with
it and, consequently, the two values with each other. */
- wide_int diff = wi::sub (cst, inv);
- if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
+ wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
+ if (wi::cmp (0, wi::to_wide (inv), sgn)
+ != wi::cmp (diff, wi::to_wide (cst), sgn))
{
- const int res = wi::cmp (cst, inv, sgn);
+ const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
return cst1 ? res : -res;
}
@@ -1635,14 +1637,15 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = wi::rshift (val1, wval2, sign);
+ res = wi::rshift (wi::to_wide (val1), wval2, sign);
else
- res = wi::lshift (val1, wval2);
+ res = wi::lshift (wi::to_wide (val1), wval2);
break;
}
case MULT_EXPR:
- res = wi::mul (val1, val2, sign, &overflow);
+ res = wi::mul (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case TRUNC_DIV_EXPR:
@@ -1653,7 +1656,8 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
return res;
}
else
- res = wi::div_trunc (val1, val2, sign, &overflow);
+ res = wi::div_trunc (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case FLOOR_DIV_EXPR:
@@ -1662,7 +1666,8 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
*overflow_p = true;
return res;
}
- res = wi::div_floor (val1, val2, sign, &overflow);
+ res = wi::div_floor (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case CEIL_DIV_EXPR:
@@ -1671,7 +1676,8 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
*overflow_p = true;
return res;
}
- res = wi::div_ceil (val1, val2, sign, &overflow);
+ res = wi::div_ceil (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case ROUND_DIV_EXPR:
@@ -1680,7 +1686,8 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
*overflow_p = 0;
return res;
}
- res = wi::div_round (val1, val2, sign, &overflow);
+ res = wi::div_round (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
default:
@@ -1755,15 +1762,15 @@ zero_nonzero_bits_from_vr (const tree expr_type,
if (range_int_cst_singleton_p (vr))
{
- *may_be_nonzero = vr->min;
+ *may_be_nonzero = wi::to_wide (vr->min);
*must_be_nonzero = *may_be_nonzero;
}
else if (tree_int_cst_sgn (vr->min) >= 0
|| tree_int_cst_sgn (vr->max) < 0)
{
- wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
- *may_be_nonzero = wi::bit_or (vr->min, vr->max);
- *must_be_nonzero = wi::bit_and (vr->min, vr->max);
+ wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
+ *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
+ *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
if (xor_mask != 0)
{
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
@@ -1801,12 +1808,12 @@ ranges_from_anti_range (value_range *ar,
{
vr0->type = VR_RANGE;
vr0->min = vrp_val_min (type);
- vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
+ vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
}
if (!vrp_val_is_max (ar->max))
{
vr1->type = VR_RANGE;
- vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
+ vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
vr1->max = vrp_val_max (type);
}
if (vr0->type == VR_UNDEFINED)
@@ -2171,8 +2178,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
}
else
{
- type_min = vrp_val_min (expr_type);
- type_max = vrp_val_max (expr_type);
+ type_min = wi::to_wide (vrp_val_min (expr_type));
+ type_max = wi::to_wide (vrp_val_max (expr_type));
}
/* Combine the lower bounds, if any. */
@@ -2180,39 +2187,42 @@ extract_range_from_binary_expr_1 (value_range *vr,
{
if (minus_p)
{
- wmin = wi::sub (min_op0, min_op1);
+ wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
/* Check for overflow. */
- if (wi::cmp (0, min_op1, sgn)
- != wi::cmp (wmin, min_op0, sgn))
- min_ovf = wi::cmp (min_op0, min_op1, sgn);
+ if (wi::cmp (0, wi::to_wide (min_op1), sgn)
+ != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
+ min_ovf = wi::cmp (wi::to_wide (min_op0),
+ wi::to_wide (min_op1), sgn);
}
else
{
- wmin = wi::add (min_op0, min_op1);
+ wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
/* Check for overflow. */
- if (wi::cmp (min_op1, 0, sgn)
- != wi::cmp (wmin, min_op0, sgn))
- min_ovf = wi::cmp (min_op0, wmin, sgn);
+ if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
+ != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
+ min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
}
}
else if (min_op0)
- wmin = min_op0;
+ wmin = wi::to_wide (min_op0);
else if (min_op1)
{
if (minus_p)
{
- wmin = wi::neg (min_op1);
+ wmin = -wi::to_wide (min_op1);
/* Check for overflow. */
- if (sgn == SIGNED && wi::neg_p (min_op1) && wi::neg_p (wmin))
+ if (sgn == SIGNED
+ && wi::neg_p (wi::to_wide (min_op1))
+ && wi::neg_p (wmin))
min_ovf = 1;
- else if (sgn == UNSIGNED && wi::ne_p (min_op1, 0))
+ else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
min_ovf = -1;
}
else
- wmin = min_op1;
+ wmin = wi::to_wide (min_op1);
}
else
wmin = wi::shwi (0, prec);
@@ -2222,38 +2232,41 @@ extract_range_from_binary_expr_1 (value_range *vr,
{
if (minus_p)
{
- wmax = wi::sub (max_op0, max_op1);
+ wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
/* Check for overflow. */
- if (wi::cmp (0, max_op1, sgn)
- != wi::cmp (wmax, max_op0, sgn))
- max_ovf = wi::cmp (max_op0, max_op1, sgn);
+ if (wi::cmp (0, wi::to_wide (max_op1), sgn)
+ != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
+ max_ovf = wi::cmp (wi::to_wide (max_op0),
+ wi::to_wide (max_op1), sgn);
}
else
{
- wmax = wi::add (max_op0, max_op1);
+ wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
- if (wi::cmp (max_op1, 0, sgn)
- != wi::cmp (wmax, max_op0, sgn))
- max_ovf = wi::cmp (max_op0, wmax, sgn);
+ if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
+ != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
+ max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
}
}
else if (max_op0)
- wmax = max_op0;
+ wmax = wi::to_wide (max_op0);
else if (max_op1)
{
if (minus_p)
{
- wmax = wi::neg (max_op1);
+ wmax = -wi::to_wide (max_op1);
/* Check for overflow. */
- if (sgn == SIGNED && wi::neg_p (max_op1) && wi::neg_p (wmax))
+ if (sgn == SIGNED
+ && wi::neg_p (wi::to_wide (max_op1))
+ && wi::neg_p (wmax))
max_ovf = 1;
- else if (sgn == UNSIGNED && wi::ne_p (max_op1, 0))
+ else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
max_ovf = -1;
}
else
- wmax = max_op1;
+ wmax = wi::to_wide (max_op1);
}
else
wmax = wi::shwi (0, prec);
@@ -2628,14 +2641,14 @@ extract_range_from_binary_expr_1 (value_range *vr,
{
low_bound = bound;
high_bound = complement;
- if (wi::ltu_p (vr0.max, low_bound))
+ if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
{
/* [5, 6] << [1, 2] == [10, 24]. */
/* We're shifting out only zeroes, the value increases
monotonically. */
in_bounds = true;
}
- else if (wi::ltu_p (high_bound, vr0.min))
+ else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
{
/* [0xffffff00, 0xffffffff] << [1, 2]
== [0xfffffc00, 0xfffffffe]. */
@@ -2649,8 +2662,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
/* [-1, 1] << [1, 2] == [-4, 4]. */
low_bound = complement;
high_bound = bound;
- if (wi::lts_p (vr0.max, high_bound)
- && wi::lts_p (low_bound, vr0.min))
+ if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
+ && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
{
/* For non-negative numbers, we're shifting out only
zeroes, the value increases monotonically.
@@ -2793,14 +2806,12 @@ extract_range_from_binary_expr_1 (value_range *vr,
signop sgn = TYPE_SIGN (expr_type);
unsigned int prec = TYPE_PRECISION (expr_type);
wide_int wmin, wmax, tmp;
- wide_int zero = wi::zero (prec);
- wide_int one = wi::one (prec);
if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
{
- wmax = wi::sub (vr1.max, one);
+ wmax = wi::to_wide (vr1.max) - 1;
if (sgn == SIGNED)
{
- tmp = wi::sub (wi::minus_one (prec), vr1.min);
+ tmp = -1 - wi::to_wide (vr1.min);
wmax = wi::smax (wmax, tmp);
}
}
@@ -2809,28 +2820,28 @@ extract_range_from_binary_expr_1 (value_range *vr,
wmax = wi::max_value (prec, sgn);
/* X % INT_MIN may be INT_MAX. */
if (sgn == UNSIGNED)
- wmax = wmax - one;
+ wmax = wmax - 1;
}
if (sgn == UNSIGNED)
- wmin = zero;
+ wmin = wi::zero (prec);
else
{
wmin = -wmax;
if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
{
- tmp = vr0.min;
- if (wi::gts_p (tmp, zero))
- tmp = zero;
+ tmp = wi::to_wide (vr0.min);
+ if (wi::gts_p (tmp, 0))
+ tmp = wi::zero (prec);
wmin = wi::smax (wmin, tmp);
}
}
if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
{
- tmp = vr0.max;
+ tmp = wi::to_wide (vr0.max);
if (sgn == SIGNED && wi::neg_p (tmp))
- tmp = zero;
+ tmp = wi::zero (prec);
wmax = wi::min (wmax, tmp, sgn);
}
@@ -2875,7 +2886,7 @@ extract_range_from_binary_expr_1 (value_range *vr,
range. */
if (vr0p && range_int_cst_p (vr0p))
{
- wide_int w = vr1p->min;
+ wide_int w = wi::to_wide (vr1p->min);
int m = 0, n = 0;
if (code == BIT_IOR_EXPR)
w = ~w;
@@ -2891,7 +2902,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
m = wi::ctz (w) - n;
}
wide_int mask = wi::mask (m + n, true, w.get_precision ());
- if (wi::eq_p (mask & vr0p->min, mask & vr0p->max))
+ if ((mask & wi::to_wide (vr0p->min))
+ == (mask & wi::to_wide (vr0p->max)))
{
min = int_const_binop (code, vr0p->min, vr1p->min);
max = int_const_binop (code, vr0p->max, vr1p->min);
@@ -2914,16 +2926,20 @@ extract_range_from_binary_expr_1 (value_range *vr,
&& tree_int_cst_sgn (vr0.max) < 0
&& tree_int_cst_sgn (vr1.max) < 0)
{
- wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
- wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr0.max),
+ TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr1.max),
+ TYPE_SIGN (expr_type));
}
/* If either input range contains only non-negative values
we can truncate the result range maximum to the respective
maximum of the input range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
- wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr0.max),
+ TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
- wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr1.max),
+ TYPE_SIGN (expr_type));
max = wide_int_to_tree (expr_type, wmax);
cmp = compare_values (min, max);
/* PR68217: In case of signed & sign-bit-CST should
@@ -2936,10 +2952,10 @@ extract_range_from_binary_expr_1 (value_range *vr,
if (!TYPE_UNSIGNED (expr_type)
&& ((int_cst_range0
&& value_range_constant_singleton (&vr0)
- && !wi::cmps (vr0.min, sign_bit))
+ && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
|| (int_cst_range1
&& value_range_constant_singleton (&vr1)
- && !wi::cmps (vr1.min, sign_bit))))
+ && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
{
min = TYPE_MIN_VALUE (expr_type);
max = build_int_cst (expr_type, 0);
@@ -2958,16 +2974,20 @@ extract_range_from_binary_expr_1 (value_range *vr,
&& tree_int_cst_sgn (vr0.min) >= 0
&& tree_int_cst_sgn (vr1.min) >= 0)
{
- wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
- wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr0.min),
+ TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr1.min),
+ TYPE_SIGN (expr_type));
}
/* If either input range contains only negative values
we can truncate the minimum of the result range to the
respective minimum range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
- wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr0.min),
+ TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
- wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr1.min),
+ TYPE_SIGN (expr_type));
min = wide_int_to_tree (expr_type, wmin);
}
else if (code == BIT_XOR_EXPR)
@@ -4044,7 +4064,7 @@ adjust_range_with_scev (value_range *vr, struct loop *loop,
if (!overflow
&& wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
&& (sgn == UNSIGNED
- || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
+ || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
{
tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
@@ -4966,9 +4986,9 @@ overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
tree inc = gimple_assign_rhs2 (op1_def);
if (reversed)
- *new_cst = wide_int_to_tree (type, max + inc);
+ *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
else
- *new_cst = wide_int_to_tree (type, max - inc);
+ *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
return true;
}
}
@@ -5290,15 +5310,15 @@ register_edge_assert_for_2 (tree name, edge e,
wide_int minval
= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
new_val = val2;
- if (minval == new_val)
+ if (minval == wi::to_wide (new_val))
new_val = NULL_TREE;
}
else
{
wide_int maxval
= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- mask |= val2;
- if (mask == maxval)
+ mask |= wi::to_wide (val2);
+ if (wi::eq_p (mask, maxval))
new_val = NULL_TREE;
else
new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
@@ -5373,8 +5393,8 @@ register_edge_assert_for_2 (tree name, edge e,
bool valid_p = false, valn, cst2n;
enum tree_code ccode = comp_code;
- valv = wide_int::from (val, nprec, UNSIGNED);
- cst2v = wide_int::from (cst2, nprec, UNSIGNED);
+ valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
+ cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
/* If CST2 doesn't have most significant bit set,
@@ -5671,9 +5691,10 @@ is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
return false;
- wide_int mask = maskt;
+ wi::tree_to_wide_ref mask = wi::to_wide (maskt);
wide_int inv_mask = ~mask;
- wide_int val = valt; // Assume VALT is INTEGER_CST
+ /* Assume VALT is INTEGER_CST. */
+ wi::tree_to_wide_ref val = wi::to_wide (valt);
if ((inv_mask & (inv_mask + 1)) != 0
|| (val & mask) != val)
@@ -6022,7 +6043,8 @@ find_switch_asserts (basic_block bb, gswitch *last)
next_min = CASE_LOW (next_cl);
next_max = CASE_HIGH (next_cl);
- wide_int difference = wi::sub (next_min, max ? max : min);
+ wide_int difference = (wi::to_wide (next_min)
+ - wi::to_wide (max ? max : min));
if (wi::eq_p (difference, 1))
max = next_max ? next_max : next_min;
else
@@ -6953,7 +6975,8 @@ maybe_set_nonzero_bits (basic_block bb, tree var)
return;
}
cst = gimple_assign_rhs2 (stmt);
- set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
+ set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+ wi::to_wide (cst)));
}
/* Convert range assertion expressions into the implied copies and
@@ -7547,7 +7570,7 @@ vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
- else if (wi::eq_p (x, max - 1))
+ else if (wi::to_wide (x) == max - 1)
{
op0 = op1;
op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
@@ -8658,7 +8681,7 @@ intersect_ranges (enum value_range_type *vr0type,
== TYPE_PRECISION (ptr_type_node))
&& TREE_CODE (vr1max) == INTEGER_CST
&& TREE_CODE (vr1min) == INTEGER_CST
- && (wi::clz (wi::sub (vr1max, vr1min))
+ && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
< TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
;
/* Else choose the range. */
@@ -9679,7 +9702,8 @@ range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
a signed wide_int, while a negative value cannot be represented
by an unsigned wide_int. */
if (src_sgn != dest_sgn
- && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
+ && (wi::lts_p (wi::to_wide (vr->min), 0)
+ || wi::lts_p (wi::to_wide (vr->max), 0)))
return false;
/* Then we can perform the conversion on both ends and compare
@@ -10275,7 +10299,7 @@ two_valued_val_range_p (tree var, tree *a, tree *b)
return false;
if (vr->type == VR_RANGE
- && wi::sub (vr->max, vr->min) == 1)
+ && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
{
*a = vr->min;
*b = vr->max;
@@ -10284,8 +10308,10 @@ two_valued_val_range_p (tree var, tree *a, tree *b)
/* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
if (vr->type == VR_ANTI_RANGE
- && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
- && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
+ && (wi::to_wide (vr->min)
+ - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
+ && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
+ - wi::to_wide (vr->max)) == 1)
{
*a = vrp_val_min (TREE_TYPE (var));
*b = vrp_val_max (TREE_TYPE (var));
@@ -10850,8 +10876,9 @@ vrp_finalize (bool warn_array_bounds_p)
vr_value[i]->max) == 1)))
set_ptr_nonnull (name);
else if (!POINTER_TYPE_P (TREE_TYPE (name)))
- set_range_info (name, vr_value[i]->type, vr_value[i]->min,
- vr_value[i]->max);
+ set_range_info (name, vr_value[i]->type,
+ wi::to_wide (vr_value[i]->min),
+ wi::to_wide (vr_value[i]->max));
}
substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
@@ -11047,8 +11074,9 @@ evrp_dom_walker::before_dom_children (basic_block bb)
|| vr_result.type == VR_ANTI_RANGE)
&& (TREE_CODE (vr_result.min) == INTEGER_CST)
&& (TREE_CODE (vr_result.max) == INTEGER_CST))
- set_range_info (lhs,
- vr_result.type, vr_result.min, vr_result.max);
+ set_range_info (lhs, vr_result.type,
+ wi::to_wide (vr_result.min),
+ wi::to_wide (vr_result.max));
}
else if (POINTER_TYPE_P (TREE_TYPE (lhs))
&& ((vr_result.type == VR_RANGE
@@ -11121,7 +11149,9 @@ evrp_dom_walker::before_dom_children (basic_block bb)
|| vr.type == VR_ANTI_RANGE)
&& (TREE_CODE (vr.min) == INTEGER_CST)
&& (TREE_CODE (vr.max) == INTEGER_CST))
- set_range_info (output, vr.type, vr.min, vr.max);
+ set_range_info (output, vr.type,
+ wi::to_wide (vr.min),
+ wi::to_wide (vr.max));
}
else if (POINTER_TYPE_P (TREE_TYPE (output))
&& ((vr.type == VR_RANGE
diff --git a/gcc/tree.c b/gcc/tree.c
index e379940f35d..a43177b6a39 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1584,7 +1584,7 @@ cache_integer_cst (tree t)
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (wi::ltu_p (t, 2))
+ if (wi::ltu_p (wi::to_wide (t), 2))
ix = TREE_INT_CST_ELT (t, 0);
break;
@@ -1603,7 +1603,7 @@ cache_integer_cst (tree t)
if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
ix = tree_to_uhwi (t);
}
- else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
ix = tree_to_uhwi (t);
}
else
@@ -1613,14 +1613,14 @@ cache_integer_cst (tree t)
if (integer_minus_onep (t))
ix = 0;
- else if (!wi::neg_p (t))
+ else if (!wi::neg_p (wi::to_wide (t)))
{
if (prec < HOST_BITS_PER_WIDE_INT)
{
if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
ix = tree_to_shwi (t) + 1;
}
- else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
ix = tree_to_shwi (t) + 1;
}
}
@@ -1652,7 +1652,7 @@ cache_integer_cst (tree t)
/* If there is already an entry for the number verify it's the
same. */
if (*slot)
- gcc_assert (wi::eq_p (tree (*slot), t));
+ gcc_assert (wi::to_wide (tree (*slot)) == wi::to_wide (t));
else
/* Otherwise insert this one into the hash table. */
*slot = t;
@@ -1969,7 +1969,7 @@ real_value_from_int_cst (const_tree type, const_tree i)
bitwise comparisons to see if two values are the same. */
memset (&d, 0, sizeof d);
- real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, i,
+ real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, wi::to_wide (i),
TYPE_SIGN (TREE_TYPE (i)));
return d;
}
@@ -2331,7 +2331,7 @@ integer_zerop (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return wi::eq_p (expr, 0);
+ return wi::to_wide (expr) == 0;
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
@@ -2410,7 +2410,8 @@ integer_all_onesp (const_tree expr)
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
+ return (wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED)
+ == wi::to_wide (expr));
}
/* Return 1 if EXPR is the integer constant minus one. */
@@ -2439,7 +2440,7 @@ integer_pow2p (const_tree expr)
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wi::popcount (expr) == 1;
+ return wi::popcount (wi::to_wide (expr)) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
@@ -2449,7 +2450,7 @@ int
integer_nonzerop (const_tree expr)
{
return ((TREE_CODE (expr) == INTEGER_CST
- && !wi::eq_p (expr, 0))
+ && wi::to_wide (expr) != 0)
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
@@ -2485,7 +2486,7 @@ tree_log2 (const_tree expr)
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wi::exact_log2 (expr);
+ return wi::exact_log2 (wi::to_wide (expr));
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
@@ -2497,7 +2498,7 @@ tree_floor_log2 (const_tree expr)
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wi::floor_log2 (expr);
+ return wi::floor_log2 (wi::to_wide (expr));
}
/* Return number of known trailing zero bits in EXPR, or, if the value of
@@ -2514,7 +2515,7 @@ tree_ctz (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- ret1 = wi::ctz (expr);
+ ret1 = wi::ctz (wi::to_wide (expr));
return MIN (ret1, prec);
case SSA_NAME:
ret1 = wi::ctz (get_nonzero_bits (expr));
@@ -4679,7 +4680,7 @@ build_simple_mem_ref_loc (location_t loc, tree ptr)
offset_int
mem_ref_offset (const_tree t)
{
- return offset_int::from (TREE_OPERAND (t, 1), SIGNED);
+ return offset_int::from (wi::to_wide (TREE_OPERAND (t, 1)), SIGNED);
}
/* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE
@@ -6614,7 +6615,7 @@ tree_int_cst_sign_bit (const_tree t)
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- return wi::extract_uhwi (t, bitno, 1);
+ return wi::extract_uhwi (wi::to_wide (t), bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
@@ -6624,11 +6625,11 @@ tree_int_cst_sign_bit (const_tree t)
int
tree_int_cst_sgn (const_tree t)
{
- if (wi::eq_p (t, 0))
+ if (wi::to_wide (t) == 0)
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (wi::neg_p (t))
+ else if (wi::neg_p (wi::to_wide (t)))
return -1;
else
return 1;
@@ -8291,7 +8292,7 @@ get_unwidened (tree op, tree for_type)
if (TREE_CODE (win) == INTEGER_CST)
{
tree wtype = TREE_TYPE (win);
- unsigned prec = wi::min_precision (win, TYPE_SIGN (wtype));
+ unsigned prec = wi::min_precision (wi::to_wide (win), TYPE_SIGN (wtype));
if (for_type)
prec = MAX (prec, final_prec);
if (prec < TYPE_PRECISION (wtype))
@@ -8412,7 +8413,7 @@ int_fits_type_p (const_tree c, const_tree type)
/* Non-standard boolean types can have arbitrary precision but various
transformations assume that they can only take values 0 and +/-1. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
- return wi::fits_to_boolean_p (c, type);
+ return wi::fits_to_boolean_p (wi::to_wide (c), type);
retry:
type_low_bound = TYPE_MIN_VALUE (type);
@@ -8455,7 +8456,7 @@ retry:
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (wi::to_wide (c)))
return false;
/* Second, narrower types always fit in wider ones. */
@@ -8474,10 +8475,10 @@ retry:
possible that the value will not fit. The test below
fails if any bit is set between the sign bit of the
underlying mode and the top bit of the type. */
- if (wi::ne_p (wi::zext (c, prec - 1), c))
+ if (wi::zext (wi::to_wide (c), prec - 1) != wi::to_wide (c))
return false;
}
- else if (wi::neg_p (c))
+ else if (wi::neg_p (wi::to_wide (c)))
return false;
}
@@ -8493,7 +8494,7 @@ retry:
}
/* Or to fits_to_tree_p, if nothing else. */
- return wi::fits_to_tree_p (c, type);
+ return wi::fits_to_tree_p (wi::to_wide (c), type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
@@ -8506,7 +8507,7 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (TYPE_MIN_VALUE (type)), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
@@ -8520,7 +8521,7 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (TYPE_MAX_VALUE (type)), max, TYPE_SIGN (type));
else
{
wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
@@ -10931,7 +10932,7 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1)
tree
num_ending_zeros (const_tree x)
{
- return build_int_cst (TREE_TYPE (x), wi::ctz (x));
+ return build_int_cst (TREE_TYPE (x), wi::ctz (wi::to_wide (x)));
}
@@ -12355,7 +12356,7 @@ drop_tree_overflow (tree t)
/* For tree codes with a sharing machinery re-build the result. */
if (TREE_CODE (t) == INTEGER_CST)
- return wide_int_to_tree (TREE_TYPE (t), t);
+ return wide_int_to_tree (TREE_TYPE (t), wi::to_wide (t));
/* Otherwise, as all tcc_constants are possibly shared, copy the node
and drop the flag. */
@@ -13528,7 +13529,7 @@ get_range_pos_neg (tree arg)
int cnt = 0;
if (TREE_CODE (arg) == INTEGER_CST)
{
- wide_int w = wi::sext (arg, prec);
+ wide_int w = wi::sext (wi::to_wide (arg), prec);
if (wi::neg_p (w))
return 2;
else
diff --git a/gcc/tree.h b/gcc/tree.h
index 5e8419e259a..7c2d2e7eb02 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5120,20 +5120,6 @@ extern bool anon_aggrname_p (const_tree);
/* The tree and const_tree overload templates. */
namespace wi
{
- template <>
- struct int_traits <const_tree>
- {
- static const enum precision_type precision_type = VAR_PRECISION;
- static const bool host_dependent_precision = false;
- static const bool is_sign_extended = false;
- static unsigned int get_precision (const_tree);
- static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
- const_tree);
- };
-
- template <>
- struct int_traits <tree> : public int_traits <const_tree> {};
-
template <int N>
class extended_tree
{
@@ -5157,42 +5143,115 @@ namespace wi
static const unsigned int precision = N;
};
- generic_wide_int <extended_tree <WIDE_INT_MAX_PRECISION> >
- to_widest (const_tree);
-
- generic_wide_int <extended_tree <ADDR_MAX_PRECISION> > to_offset (const_tree);
+ typedef const generic_wide_int <extended_tree <WIDE_INT_MAX_PRECISION> >
+ tree_to_widest_ref;
+ typedef const generic_wide_int <extended_tree <ADDR_MAX_PRECISION> >
+ tree_to_offset_ref;
+ typedef const generic_wide_int<wide_int_ref_storage<false, false> >
+ tree_to_wide_ref;
+ tree_to_widest_ref to_widest (const_tree);
+ tree_to_offset_ref to_offset (const_tree);
+ tree_to_wide_ref to_wide (const_tree);
wide_int to_wide (const_tree, unsigned int);
}
-inline unsigned int
-wi::int_traits <const_tree>::get_precision (const_tree tcst)
-{
- return TYPE_PRECISION (TREE_TYPE (tcst));
-}
+/* Refer to INTEGER_CST T as though it were a widest_int.
-/* Convert the tree_cst X into a wide_int of PRECISION. */
-inline wi::storage_ref
-wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *,
- unsigned int precision, const_tree x)
-{
- gcc_checking_assert (precision == TYPE_PRECISION (TREE_TYPE (x)));
- return wi::storage_ref (&TREE_INT_CST_ELT (x, 0), TREE_INT_CST_NUNITS (x),
- precision);
-}
+ This function gives T's actual numerical value, influenced by the
+ signedness of its type. For example, a signed byte with just the
+ top bit set would be -128 while an unsigned byte with the same
+ bit pattern would be 128.
+
+ This is the right choice when operating on groups of INTEGER_CSTs
+ that might have different signedness or precision. It is also the
+ right choice in code that specifically needs an approximation of
+ infinite-precision arithmetic instead of normal modulo arithmetic.
+
+ The approximation of infinite precision is good enough for realistic
+ numbers of additions and subtractions of INTEGER_CSTs (where
+ "realistic" includes any number less than 1 << 31) but it cannot
+ represent the result of multiplying the two largest supported
+ INTEGER_CSTs. The overflow-checking form of wi::mul provides a way
+ of multiplying two arbitrary INTEGER_CSTs and checking that the
+ result is representable as a widest_int.
+
+ Note that any overflow checking done on these values is relative to
+ the range of widest_int rather than the range of a TREE_TYPE.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_widest_ref wt = wi::to_widest (t);
+
+ instead of:
-inline generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION> >
+ widest_int wt = wi::to_widest (t). */
+
+inline wi::tree_to_widest_ref
wi::to_widest (const_tree t)
{
return t;
}
-inline generic_wide_int <wi::extended_tree <ADDR_MAX_PRECISION> >
+/* Refer to INTEGER_CST T as though it were an offset_int.
+
+ This function is an optimisation of wi::to_widest for cases
+ in which T is known to be a bit or byte count in the range
+ (-(2 ^ (N + BITS_PER_UNIT)), 2 ^ (N + BITS_PER_UNIT)), where N is
+ the target's address size in bits.
+
+ This is the right choice when operating on bit or byte counts as
+ untyped numbers rather than M-bit values. The wi::to_widest comments
+ about addition, subtraction and multiplication apply here: sequences
+ of 1 << 31 additions and subtractions do not induce overflow, but
+ multiplying the largest sizes might. Again,
+
+ wi::tree_to_offset_ref wt = wi::to_offset (t);
+
+ is more efficient than:
+
+ offset_int wt = wi::to_offset (t). */
+
+inline wi::tree_to_offset_ref
wi::to_offset (const_tree t)
{
return t;
}
+/* Refer to INTEGER_CST T as though it were a wide_int.
+
+ In contrast to the approximation of infinite-precision numbers given
+ by wi::to_widest and wi::to_offset, this function treats T as a
+ signless collection of N bits, where N is the precision of T's type.
+ As with machine registers, signedness is determined by the operation
+ rather than the operands; for example, there is a distinction between
+ signed and unsigned division.
+
+ This is the right choice when operating on values with the same type
+ using normal modulo arithmetic. The overflow-checking forms of things
+ like wi::add check whether the result can be represented in T's type.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_wide_ref wt = wi::to_wide (t);
+
+ instead of:
+
+ wide_int wt = wi::to_wide (t). */
+
+inline wi::tree_to_wide_ref
+wi::to_wide (const_tree t)
+{
+ return wi::storage_ref (&TREE_INT_CST_ELT (t, 0), TREE_INT_CST_NUNITS (t),
+ TYPE_PRECISION (TREE_TYPE (t)));
+}
+
/* Convert INTEGER_CST T to a wide_int of precision PREC, extending or
truncating as necessary. When extending, use sign extension if T's
type is signed and zero extension if T's type is unsigned. */
@@ -5200,7 +5259,7 @@ wi::to_offset (const_tree t)
inline wide_int
wi::to_wide (const_tree t, unsigned int prec)
{
- return wide_int::from (t, prec, TYPE_SIGN (TREE_TYPE (t)));
+ return wide_int::from (wi::to_wide (t), prec, TYPE_SIGN (TREE_TYPE (t)));
}
template <int N>
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 1030168e6b7..6c4fe0e77a0 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -1164,8 +1164,8 @@ ubsan_expand_ptr_ifn (gimple_stmt_iterator *gsip)
unlink_stmt_vdef (stmt);
if (TREE_CODE (off) == INTEGER_CST)
- g = gimple_build_cond (wi::neg_p (off) ? LT_EXPR : GE_EXPR, ptri,
- fold_build1 (NEGATE_EXPR, sizetype, off),
+ g = gimple_build_cond (wi::neg_p (wi::to_wide (off)) ? LT_EXPR : GE_EXPR,
+ ptri, fold_build1 (NEGATE_EXPR, sizetype, off),
NULL_TREE, NULL_TREE);
else if (pos_neg != 3)
g = gimple_build_cond (pos_neg == 1 ? LT_EXPR : GT_EXPR,
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 56bc5345ba4..e17b016af04 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -150,15 +150,23 @@ along with GCC; see the file COPYING3. If not see
and in wider precisions.
There are constructors to create the various forms of wide_int from
- trees, rtl and constants. For trees you can simply say:
+ trees, rtl and constants. For trees the options are:
tree t = ...;
- wide_int x = t;
+ wi::to_wide (t) // Treat T as a wide_int
+ wi::to_offset (t) // Treat T as an offset_int
+ wi::to_widest (t) // Treat T as a widest_int
- However, a little more syntax is required for rtl constants since
- they do not have an explicit precision. To make an rtl into a
- wide_int, you have to pair it with a mode. The canonical way to do
- this is with rtx_mode_t as in:
+ All three are light-weight accessors that should have no overhead
+ in release builds. If it is useful for readability reasons to
+ store the result in a temporary variable, the preferred method is:
+
+ wi::tree_to_wide_ref twide = wi::to_wide (t);
+ wi::tree_to_offset_ref toffset = wi::to_offset (t);
+ wi::tree_to_widest_ref twidest = wi::to_widest (t);
+
+ To make an rtx into a wide_int, you have to pair it with a mode.
+ The canonical way to do this is with rtx_mode_t as in:
rtx r = ...
wide_int x = rtx_mode_t (r, mode);
@@ -175,23 +183,22 @@ along with GCC; see the file COPYING3. If not see
offset_int x = (int) c; // sign-extend C
widest_int x = (unsigned int) c; // zero-extend C
- It is also possible to do arithmetic directly on trees, rtxes and
+ It is also possible to do arithmetic directly on rtx_mode_ts and
constants. For example:
- wi::add (t1, t2); // add equal-sized INTEGER_CSTs t1 and t2
- wi::add (t1, 1); // add 1 to INTEGER_CST t1
- wi::add (r1, r2); // add equal-sized rtx constants r1 and r2
+ wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2
+ wi::add (r1, 1); // add 1 to rtx_mode_t r1
wi::lshift (1, 100); // 1 << 100 as a widest_int
Many binary operations place restrictions on the combinations of inputs,
using the following rules:
- - {tree, rtx, wide_int} op {tree, rtx, wide_int} -> wide_int
+ - {rtx, wide_int} op {rtx, wide_int} -> wide_int
The inputs must be the same precision. The result is a wide_int
of the same precision
- - {tree, rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
- (un)signed HOST_WIDE_INT op {tree, rtx, wide_int} -> wide_int
+ - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
+ (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
The HOST_WIDE_INT is extended or truncated to the precision of
the other input. The result is a wide_int of the same precision
as that input.
@@ -316,7 +323,9 @@ typedef generic_wide_int <wide_int_storage> wide_int;
typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
-template <bool SE>
+/* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+template <bool SE, bool HDP = true>
struct wide_int_ref_storage;
typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
@@ -330,7 +339,8 @@ typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
to use those. */
#define WIDE_INT_REF_FOR(T) \
generic_wide_int \
- <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended> >
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
+ wi::int_traits <T>::host_dependent_precision> >
namespace wi
{
@@ -929,7 +939,7 @@ decompose (HOST_WIDE_INT *, unsigned int precision,
/* Provide the storage for a wide_int_ref. This acts like a read-only
wide_int, with the optimization that VAL is normally a pointer to
another integer's storage, so that no array copy is needed. */
-template <bool SE>
+template <bool SE, bool HDP>
struct wide_int_ref_storage : public wi::storage_ref
{
private:
@@ -948,8 +958,8 @@ public:
};
/* Create a reference from an existing reference. */
-template <bool SE>
-inline wide_int_ref_storage <SE>::
+template <bool SE, bool HDP>
+inline wide_int_ref_storage <SE, HDP>::
wide_int_ref_storage (const wi::storage_ref &x)
: storage_ref (x)
{}
@@ -957,32 +967,30 @@ wide_int_ref_storage (const wi::storage_ref &x)
/* Create a reference to integer X in its natural precision. Note
that the natural precision is host-dependent for primitive
types. */
-template <bool SE>
+template <bool SE, bool HDP>
template <typename T>
-inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x)
+inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
: storage_ref (wi::int_traits <T>::decompose (scratch,
wi::get_precision (x), x))
{
}
/* Create a reference to integer X in precision PRECISION. */
-template <bool SE>
+template <bool SE, bool HDP>
template <typename T>
-inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x,
- unsigned int precision)
+inline wide_int_ref_storage <SE, HDP>::
+wide_int_ref_storage (const T &x, unsigned int precision)
: storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
{
}
namespace wi
{
- template <bool SE>
- struct int_traits <wide_int_ref_storage <SE> >
+ template <bool SE, bool HDP>
+ struct int_traits <wide_int_ref_storage <SE, HDP> >
{
static const enum precision_type precision_type = VAR_PRECISION;
- /* wi::storage_ref can be a reference to a primitive type,
- so this is the conservatively-correct setting. */
- static const bool host_dependent_precision = true;
+ static const bool host_dependent_precision = HDP;
static const bool is_sign_extended = SE;
};
}